llama-stack-api 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack_api/__init__.py +945 -0
- llama_stack_api/admin/__init__.py +45 -0
- llama_stack_api/admin/api.py +72 -0
- llama_stack_api/admin/fastapi_routes.py +117 -0
- llama_stack_api/admin/models.py +113 -0
- llama_stack_api/agents.py +173 -0
- llama_stack_api/batches/__init__.py +40 -0
- llama_stack_api/batches/api.py +53 -0
- llama_stack_api/batches/fastapi_routes.py +113 -0
- llama_stack_api/batches/models.py +78 -0
- llama_stack_api/benchmarks/__init__.py +43 -0
- llama_stack_api/benchmarks/api.py +39 -0
- llama_stack_api/benchmarks/fastapi_routes.py +109 -0
- llama_stack_api/benchmarks/models.py +109 -0
- llama_stack_api/common/__init__.py +5 -0
- llama_stack_api/common/content_types.py +101 -0
- llama_stack_api/common/errors.py +95 -0
- llama_stack_api/common/job_types.py +38 -0
- llama_stack_api/common/responses.py +77 -0
- llama_stack_api/common/training_types.py +47 -0
- llama_stack_api/common/type_system.py +146 -0
- llama_stack_api/connectors.py +146 -0
- llama_stack_api/conversations.py +270 -0
- llama_stack_api/datasetio.py +55 -0
- llama_stack_api/datasets/__init__.py +61 -0
- llama_stack_api/datasets/api.py +35 -0
- llama_stack_api/datasets/fastapi_routes.py +104 -0
- llama_stack_api/datasets/models.py +152 -0
- llama_stack_api/datatypes.py +373 -0
- llama_stack_api/eval.py +137 -0
- llama_stack_api/file_processors/__init__.py +27 -0
- llama_stack_api/file_processors/api.py +64 -0
- llama_stack_api/file_processors/fastapi_routes.py +78 -0
- llama_stack_api/file_processors/models.py +42 -0
- llama_stack_api/files/__init__.py +35 -0
- llama_stack_api/files/api.py +51 -0
- llama_stack_api/files/fastapi_routes.py +124 -0
- llama_stack_api/files/models.py +107 -0
- llama_stack_api/inference.py +1169 -0
- llama_stack_api/inspect_api/__init__.py +37 -0
- llama_stack_api/inspect_api/api.py +25 -0
- llama_stack_api/inspect_api/fastapi_routes.py +76 -0
- llama_stack_api/inspect_api/models.py +28 -0
- llama_stack_api/internal/__init__.py +9 -0
- llama_stack_api/internal/kvstore.py +28 -0
- llama_stack_api/internal/sqlstore.py +81 -0
- llama_stack_api/models.py +171 -0
- llama_stack_api/openai_responses.py +1468 -0
- llama_stack_api/post_training.py +370 -0
- llama_stack_api/prompts.py +203 -0
- llama_stack_api/providers/__init__.py +33 -0
- llama_stack_api/providers/api.py +16 -0
- llama_stack_api/providers/fastapi_routes.py +57 -0
- llama_stack_api/providers/models.py +24 -0
- llama_stack_api/rag_tool.py +168 -0
- llama_stack_api/resource.py +37 -0
- llama_stack_api/router_utils.py +160 -0
- llama_stack_api/safety.py +132 -0
- llama_stack_api/schema_utils.py +208 -0
- llama_stack_api/scoring.py +93 -0
- llama_stack_api/scoring_functions.py +211 -0
- llama_stack_api/shields.py +93 -0
- llama_stack_api/tools.py +226 -0
- llama_stack_api/vector_io.py +941 -0
- llama_stack_api/vector_stores.py +53 -0
- llama_stack_api/version.py +9 -0
- {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.4.4.dist-info}/METADATA +1 -1
- llama_stack_api-0.4.4.dist-info/RECORD +70 -0
- llama_stack_api-0.4.4.dist-info/top_level.txt +1 -0
- llama_stack_api-0.4.3.dist-info/RECORD +0 -4
- llama_stack_api-0.4.3.dist-info/top_level.txt +0 -1
- {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.4.4.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from enum import Enum, EnumMeta, StrEnum
|
|
8
|
+
from typing import Any, Protocol
|
|
9
|
+
from urllib.parse import urlparse
|
|
10
|
+
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
|
|
13
|
+
from llama_stack_api.benchmarks import Benchmark
|
|
14
|
+
from llama_stack_api.datasets import Dataset
|
|
15
|
+
from llama_stack_api.models import Model
|
|
16
|
+
from llama_stack_api.schema_utils import json_schema_type
|
|
17
|
+
from llama_stack_api.scoring_functions import ScoringFn
|
|
18
|
+
from llama_stack_api.shields import Shield
|
|
19
|
+
from llama_stack_api.tools import ToolGroup
|
|
20
|
+
from llama_stack_api.vector_stores import VectorStore
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DynamicApiMeta(EnumMeta):
|
|
24
|
+
def __new__(cls, name, bases, namespace):
|
|
25
|
+
# Store the original enum values
|
|
26
|
+
original_values = {k: v for k, v in namespace.items() if not k.startswith("_")}
|
|
27
|
+
|
|
28
|
+
# Create the enum class
|
|
29
|
+
cls = super().__new__(cls, name, bases, namespace)
|
|
30
|
+
|
|
31
|
+
# Store the original values for reference
|
|
32
|
+
cls._original_values = original_values
|
|
33
|
+
# Initialize _dynamic_values
|
|
34
|
+
cls._dynamic_values = {}
|
|
35
|
+
|
|
36
|
+
return cls
|
|
37
|
+
|
|
38
|
+
def __call__(cls, value):
|
|
39
|
+
try:
|
|
40
|
+
return super().__call__(value)
|
|
41
|
+
except ValueError as e:
|
|
42
|
+
# If this value was already dynamically added, return it
|
|
43
|
+
if value in cls._dynamic_values:
|
|
44
|
+
return cls._dynamic_values[value]
|
|
45
|
+
|
|
46
|
+
# If the value doesn't exist, create a new enum member
|
|
47
|
+
# Create a new member name from the value
|
|
48
|
+
member_name = value.lower().replace("-", "_")
|
|
49
|
+
|
|
50
|
+
# If this member name already exists in the enum, return the existing member
|
|
51
|
+
if member_name in cls._member_map_:
|
|
52
|
+
return cls._member_map_[member_name]
|
|
53
|
+
|
|
54
|
+
# Instead of creating a new member, raise ValueError to force users to use Api.add() to
|
|
55
|
+
# register new APIs explicitly
|
|
56
|
+
raise ValueError(f"API '{value}' does not exist. Use Api.add() to register new APIs.") from e
|
|
57
|
+
|
|
58
|
+
def __iter__(cls):
|
|
59
|
+
# Allow iteration over both static and dynamic members
|
|
60
|
+
yield from super().__iter__()
|
|
61
|
+
if hasattr(cls, "_dynamic_values"):
|
|
62
|
+
yield from cls._dynamic_values.values()
|
|
63
|
+
|
|
64
|
+
def add(cls, value):
|
|
65
|
+
"""
|
|
66
|
+
Add a new API to the enum.
|
|
67
|
+
Used to register external APIs.
|
|
68
|
+
"""
|
|
69
|
+
member_name = value.lower().replace("-", "_")
|
|
70
|
+
|
|
71
|
+
# If this member name already exists in the enum, return it
|
|
72
|
+
if member_name in cls._member_map_:
|
|
73
|
+
return cls._member_map_[member_name]
|
|
74
|
+
|
|
75
|
+
# Create a new enum member
|
|
76
|
+
member = object.__new__(cls)
|
|
77
|
+
member._name_ = member_name
|
|
78
|
+
member._value_ = value
|
|
79
|
+
|
|
80
|
+
# Add it to the enum class
|
|
81
|
+
cls._member_map_[member_name] = member
|
|
82
|
+
cls._member_names_.append(member_name)
|
|
83
|
+
cls._member_type_ = str
|
|
84
|
+
|
|
85
|
+
# Store it in our dynamic values
|
|
86
|
+
cls._dynamic_values[value] = member
|
|
87
|
+
|
|
88
|
+
return member
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@json_schema_type
|
|
92
|
+
class Api(Enum, metaclass=DynamicApiMeta):
|
|
93
|
+
"""Enumeration of all available APIs in the Llama Stack system.
|
|
94
|
+
:cvar providers: Provider management and configuration
|
|
95
|
+
:cvar inference: Text generation, chat completions, and embeddings
|
|
96
|
+
:cvar safety: Content moderation and safety shields
|
|
97
|
+
:cvar agents: Agent orchestration and execution
|
|
98
|
+
:cvar batches: Batch processing for asynchronous API requests
|
|
99
|
+
:cvar vector_io: Vector database operations and queries
|
|
100
|
+
:cvar datasetio: Dataset input/output operations
|
|
101
|
+
:cvar scoring: Model output evaluation and scoring
|
|
102
|
+
:cvar eval: Model evaluation and benchmarking framework
|
|
103
|
+
:cvar post_training: Fine-tuning and model training
|
|
104
|
+
:cvar tool_runtime: Tool execution and management
|
|
105
|
+
:cvar telemetry: Observability and system monitoring
|
|
106
|
+
:cvar models: Model metadata and management
|
|
107
|
+
:cvar shields: Safety shield implementations
|
|
108
|
+
:cvar datasets: Dataset creation and management
|
|
109
|
+
:cvar scoring_functions: Scoring function definitions
|
|
110
|
+
:cvar benchmarks: Benchmark suite management
|
|
111
|
+
:cvar tool_groups: Tool group organization
|
|
112
|
+
:cvar files: File storage and management
|
|
113
|
+
:cvar file_processors: File parsing and processing operations
|
|
114
|
+
:cvar prompts: Prompt versions and management
|
|
115
|
+
:cvar connectors: External connector management (e.g., MCP servers)
|
|
116
|
+
:cvar inspect: Built-in system inspection and introspection
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
providers = "providers"
|
|
120
|
+
inference = "inference"
|
|
121
|
+
safety = "safety"
|
|
122
|
+
agents = "agents"
|
|
123
|
+
batches = "batches"
|
|
124
|
+
vector_io = "vector_io"
|
|
125
|
+
datasetio = "datasetio"
|
|
126
|
+
scoring = "scoring"
|
|
127
|
+
eval = "eval"
|
|
128
|
+
post_training = "post_training"
|
|
129
|
+
tool_runtime = "tool_runtime"
|
|
130
|
+
|
|
131
|
+
models = "models"
|
|
132
|
+
shields = "shields"
|
|
133
|
+
vector_stores = "vector_stores" # only used for routing table
|
|
134
|
+
datasets = "datasets"
|
|
135
|
+
scoring_functions = "scoring_functions"
|
|
136
|
+
benchmarks = "benchmarks"
|
|
137
|
+
tool_groups = "tool_groups"
|
|
138
|
+
files = "files"
|
|
139
|
+
file_processors = "file_processors"
|
|
140
|
+
prompts = "prompts"
|
|
141
|
+
conversations = "conversations"
|
|
142
|
+
connectors = "connectors"
|
|
143
|
+
|
|
144
|
+
# built-in API
|
|
145
|
+
inspect = "inspect"
|
|
146
|
+
admin = "admin"
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
@json_schema_type
|
|
150
|
+
class Error(BaseModel):
|
|
151
|
+
"""
|
|
152
|
+
Error response from the API. Roughly follows RFC 7807.
|
|
153
|
+
|
|
154
|
+
:param status: HTTP status code
|
|
155
|
+
:param title: Error title, a short summary of the error which is invariant for an error type
|
|
156
|
+
:param detail: Error detail, a longer human-readable description of the error
|
|
157
|
+
:param instance: (Optional) A URL which can be used to retrieve more information about the specific occurrence of the error
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
status: int
|
|
161
|
+
title: str
|
|
162
|
+
detail: str
|
|
163
|
+
instance: str | None = None
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
class ExternalApiSpec(BaseModel):
|
|
167
|
+
"""Specification for an external API implementation."""
|
|
168
|
+
|
|
169
|
+
module: str = Field(..., description="Python module containing the API implementation")
|
|
170
|
+
name: str = Field(..., description="Name of the API")
|
|
171
|
+
pip_packages: list[str] = Field(default=[], description="List of pip packages to install the API")
|
|
172
|
+
protocol: str = Field(..., description="Name of the protocol class for the API")
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
# Provider-related types (merged from providers/datatypes.py)
|
|
176
|
+
# NOTE: These imports are forward references to avoid circular dependencies
|
|
177
|
+
# They will be resolved at runtime when the classes are used
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
class ModelsProtocolPrivate(Protocol):
|
|
181
|
+
"""
|
|
182
|
+
Protocol for model management.
|
|
183
|
+
|
|
184
|
+
This allows users to register their preferred model identifiers.
|
|
185
|
+
|
|
186
|
+
Model registration requires -
|
|
187
|
+
- a provider, used to route the registration request
|
|
188
|
+
- a model identifier, user's intended name for the model during inference
|
|
189
|
+
- a provider model identifier, a model identifier supported by the provider
|
|
190
|
+
|
|
191
|
+
Providers will only accept registration for provider model ids they support.
|
|
192
|
+
|
|
193
|
+
Example,
|
|
194
|
+
register: provider x my-model-id x provider-model-id
|
|
195
|
+
-> Error if provider does not support provider-model-id
|
|
196
|
+
-> Error if my-model-id is already registered
|
|
197
|
+
-> Success if provider supports provider-model-id
|
|
198
|
+
inference: my-model-id x ...
|
|
199
|
+
-> Provider uses provider-model-id for inference
|
|
200
|
+
"""
|
|
201
|
+
|
|
202
|
+
# this should be called `on_model_register` or something like that.
|
|
203
|
+
# the provider should _not_ be able to change the object in this
|
|
204
|
+
# callback
|
|
205
|
+
async def register_model(self, model: Model) -> Model: ...
|
|
206
|
+
|
|
207
|
+
async def unregister_model(self, model_id: str) -> None: ...
|
|
208
|
+
|
|
209
|
+
# the Stack router will query each provider for their list of models
|
|
210
|
+
# if a `refresh_interval_seconds` is provided, this method will be called
|
|
211
|
+
# periodically to refresh the list of models
|
|
212
|
+
#
|
|
213
|
+
# NOTE: each model returned will be registered with the model registry. this means
|
|
214
|
+
# a callback to the `register_model()` method will be made. this is duplicative and
|
|
215
|
+
# may be removed in the future.
|
|
216
|
+
async def list_models(self) -> list[Model] | None: ...
|
|
217
|
+
|
|
218
|
+
async def should_refresh_models(self) -> bool: ...
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class ShieldsProtocolPrivate(Protocol):
|
|
222
|
+
async def register_shield(self, shield: Shield) -> None: ...
|
|
223
|
+
|
|
224
|
+
async def unregister_shield(self, identifier: str) -> None: ...
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
class VectorStoresProtocolPrivate(Protocol):
|
|
228
|
+
async def register_vector_store(self, vector_store: VectorStore) -> None: ...
|
|
229
|
+
|
|
230
|
+
async def unregister_vector_store(self, vector_store_id: str) -> None: ...
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
class DatasetsProtocolPrivate(Protocol):
|
|
234
|
+
async def register_dataset(self, dataset: Dataset) -> None: ...
|
|
235
|
+
|
|
236
|
+
async def unregister_dataset(self, dataset_id: str) -> None: ...
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
class ScoringFunctionsProtocolPrivate(Protocol):
|
|
240
|
+
async def list_scoring_functions(self) -> list[ScoringFn]: ...
|
|
241
|
+
|
|
242
|
+
async def register_scoring_function(self, scoring_fn: ScoringFn) -> None: ...
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
class BenchmarksProtocolPrivate(Protocol):
|
|
246
|
+
async def register_benchmark(self, benchmark: Benchmark) -> None: ...
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
class ToolGroupsProtocolPrivate(Protocol):
|
|
250
|
+
async def register_toolgroup(self, toolgroup: ToolGroup) -> None: ...
|
|
251
|
+
|
|
252
|
+
async def unregister_toolgroup(self, toolgroup_id: str) -> None: ...
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
@json_schema_type
|
|
256
|
+
class ProviderSpec(BaseModel):
|
|
257
|
+
api: Api
|
|
258
|
+
provider_type: str
|
|
259
|
+
config_class: str = Field(
|
|
260
|
+
...,
|
|
261
|
+
description="Fully-qualified classname of the config for this provider",
|
|
262
|
+
)
|
|
263
|
+
api_dependencies: list[Api] = Field(
|
|
264
|
+
default_factory=list,
|
|
265
|
+
description="Higher-level API surfaces may depend on other providers to provide their functionality",
|
|
266
|
+
)
|
|
267
|
+
optional_api_dependencies: list[Api] = Field(
|
|
268
|
+
default_factory=list,
|
|
269
|
+
)
|
|
270
|
+
deprecation_warning: str | None = Field(
|
|
271
|
+
default=None,
|
|
272
|
+
description="If this provider is deprecated, specify the warning message here",
|
|
273
|
+
)
|
|
274
|
+
deprecation_error: str | None = Field(
|
|
275
|
+
default=None,
|
|
276
|
+
description="If this provider is deprecated and does NOT work, specify the error message here",
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
module: str | None = Field(
|
|
280
|
+
default=None,
|
|
281
|
+
description="""
|
|
282
|
+
Fully-qualified name of the module to import. The module is expected to have:
|
|
283
|
+
|
|
284
|
+
- `get_adapter_impl(config, deps)`: returns the adapter implementation
|
|
285
|
+
|
|
286
|
+
Example: `module: ramalama_stack`
|
|
287
|
+
""",
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
pip_packages: list[str] = Field(
|
|
291
|
+
default_factory=list,
|
|
292
|
+
description="The pip dependencies needed for this implementation",
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
provider_data_validator: str | None = Field(
|
|
296
|
+
default=None,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.")
|
|
300
|
+
|
|
301
|
+
# used internally by the resolver; this is a hack for now
|
|
302
|
+
deps__: list[str] = Field(default_factory=list)
|
|
303
|
+
|
|
304
|
+
@property
|
|
305
|
+
def is_sample(self) -> bool:
|
|
306
|
+
return self.provider_type in ("sample", "remote::sample")
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
class RoutingTable(Protocol):
|
|
310
|
+
async def get_provider_impl(self, routing_key: str) -> Any: ...
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
@json_schema_type
|
|
314
|
+
class InlineProviderSpec(ProviderSpec):
|
|
315
|
+
container_image: str | None = Field(
|
|
316
|
+
default=None,
|
|
317
|
+
description="""
|
|
318
|
+
The container image to use for this implementation. If one is provided, pip_packages will be ignored.
|
|
319
|
+
If a provider depends on other providers, the dependencies MUST NOT specify a container image.
|
|
320
|
+
""",
|
|
321
|
+
)
|
|
322
|
+
description: str | None = Field(
|
|
323
|
+
default=None,
|
|
324
|
+
description="""
|
|
325
|
+
A description of the provider. This is used to display in the documentation.
|
|
326
|
+
""",
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
class RemoteProviderConfig(BaseModel):
|
|
331
|
+
host: str = "localhost"
|
|
332
|
+
port: int | None = None
|
|
333
|
+
protocol: str = "http"
|
|
334
|
+
|
|
335
|
+
@property
|
|
336
|
+
def url(self) -> str:
|
|
337
|
+
if self.port is None:
|
|
338
|
+
return f"{self.protocol}://{self.host}"
|
|
339
|
+
return f"{self.protocol}://{self.host}:{self.port}"
|
|
340
|
+
|
|
341
|
+
@classmethod
|
|
342
|
+
def from_url(cls, url: str) -> "RemoteProviderConfig":
|
|
343
|
+
parsed = urlparse(url)
|
|
344
|
+
attrs = {k: v for k, v in parsed._asdict().items() if v is not None}
|
|
345
|
+
return cls(**attrs)
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
@json_schema_type
|
|
349
|
+
class RemoteProviderSpec(ProviderSpec):
|
|
350
|
+
adapter_type: str = Field(
|
|
351
|
+
...,
|
|
352
|
+
description="Unique identifier for this adapter",
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
description: str | None = Field(
|
|
356
|
+
default=None,
|
|
357
|
+
description="""
|
|
358
|
+
A description of the provider. This is used to display in the documentation.
|
|
359
|
+
""",
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
@property
|
|
363
|
+
def container_image(self) -> str | None:
|
|
364
|
+
return None
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
class HealthStatus(StrEnum):
|
|
368
|
+
OK = "OK"
|
|
369
|
+
ERROR = "Error"
|
|
370
|
+
NOT_IMPLEMENTED = "Not Implemented"
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
HealthResponse = dict[str, Any]
|
llama_stack_api/eval.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from typing import Any, Literal, Protocol
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel, Field
|
|
10
|
+
|
|
11
|
+
from llama_stack_api.common.job_types import Job
|
|
12
|
+
from llama_stack_api.inference import SamplingParams, SystemMessage
|
|
13
|
+
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
|
14
|
+
from llama_stack_api.scoring import ScoringResult
|
|
15
|
+
from llama_stack_api.scoring_functions import ScoringFnParams
|
|
16
|
+
from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@json_schema_type
|
|
20
|
+
class ModelCandidate(BaseModel):
|
|
21
|
+
"""A model candidate for evaluation.
|
|
22
|
+
|
|
23
|
+
:param model: The model ID to evaluate.
|
|
24
|
+
:param sampling_params: The sampling parameters for the model.
|
|
25
|
+
:param system_message: (Optional) The system message providing instructions or context to the model.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
type: Literal["model"] = "model"
|
|
29
|
+
model: str
|
|
30
|
+
sampling_params: SamplingParams
|
|
31
|
+
system_message: SystemMessage | None = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
EvalCandidate = ModelCandidate
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@json_schema_type
|
|
38
|
+
class BenchmarkConfig(BaseModel):
|
|
39
|
+
"""A benchmark configuration for evaluation.
|
|
40
|
+
|
|
41
|
+
:param eval_candidate: The candidate to evaluate.
|
|
42
|
+
:param scoring_params: Map between scoring function id and parameters for each scoring function you want to run
|
|
43
|
+
:param num_examples: (Optional) The number of examples to evaluate. If not provided, all examples in the dataset will be evaluated
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
eval_candidate: EvalCandidate
|
|
47
|
+
scoring_params: dict[str, ScoringFnParams] = Field(
|
|
48
|
+
description="Map between scoring function id and parameters for each scoring function you want to run",
|
|
49
|
+
default_factory=dict,
|
|
50
|
+
)
|
|
51
|
+
num_examples: int | None = Field(
|
|
52
|
+
description="Number of examples to evaluate (useful for testing), if not provided, all examples in the dataset will be evaluated",
|
|
53
|
+
default=None,
|
|
54
|
+
)
|
|
55
|
+
# we could optinally add any specific dataset config here
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@json_schema_type
|
|
59
|
+
class EvaluateResponse(BaseModel):
|
|
60
|
+
"""The response from an evaluation.
|
|
61
|
+
|
|
62
|
+
:param generations: The generations from the evaluation.
|
|
63
|
+
:param scores: The scores from the evaluation.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
generations: list[dict[str, Any]]
|
|
67
|
+
# each key in the dict is a scoring function name
|
|
68
|
+
scores: dict[str, ScoringResult]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class Eval(Protocol):
|
|
72
|
+
"""Evaluations
|
|
73
|
+
|
|
74
|
+
Llama Stack Evaluation API for running evaluations on model and agent candidates."""
|
|
75
|
+
|
|
76
|
+
@webmethod(route="/eval/benchmarks/{benchmark_id}/jobs", method="POST", level=LLAMA_STACK_API_V1ALPHA)
|
|
77
|
+
async def run_eval(
|
|
78
|
+
self,
|
|
79
|
+
benchmark_id: str,
|
|
80
|
+
benchmark_config: BenchmarkConfig,
|
|
81
|
+
) -> Job:
|
|
82
|
+
"""Run an evaluation on a benchmark.
|
|
83
|
+
|
|
84
|
+
:param benchmark_id: The ID of the benchmark to run the evaluation on.
|
|
85
|
+
:param benchmark_config: The configuration for the benchmark.
|
|
86
|
+
:returns: The job that was created to run the evaluation.
|
|
87
|
+
"""
|
|
88
|
+
...
|
|
89
|
+
|
|
90
|
+
@webmethod(route="/eval/benchmarks/{benchmark_id}/evaluations", method="POST", level=LLAMA_STACK_API_V1ALPHA)
|
|
91
|
+
async def evaluate_rows(
|
|
92
|
+
self,
|
|
93
|
+
benchmark_id: str,
|
|
94
|
+
input_rows: list[dict[str, Any]],
|
|
95
|
+
scoring_functions: list[str],
|
|
96
|
+
benchmark_config: BenchmarkConfig,
|
|
97
|
+
) -> EvaluateResponse:
|
|
98
|
+
"""Evaluate a list of rows on a benchmark.
|
|
99
|
+
|
|
100
|
+
:param benchmark_id: The ID of the benchmark to run the evaluation on.
|
|
101
|
+
:param input_rows: The rows to evaluate.
|
|
102
|
+
:param scoring_functions: The scoring functions to use for the evaluation.
|
|
103
|
+
:param benchmark_config: The configuration for the benchmark.
|
|
104
|
+
:returns: EvaluateResponse object containing generations and scores.
|
|
105
|
+
"""
|
|
106
|
+
...
|
|
107
|
+
|
|
108
|
+
@webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA)
|
|
109
|
+
async def job_status(self, benchmark_id: str, job_id: str) -> Job:
|
|
110
|
+
"""Get the status of a job.
|
|
111
|
+
|
|
112
|
+
:param benchmark_id: The ID of the benchmark to run the evaluation on.
|
|
113
|
+
:param job_id: The ID of the job to get the status of.
|
|
114
|
+
:returns: The status of the evaluation job.
|
|
115
|
+
"""
|
|
116
|
+
...
|
|
117
|
+
|
|
118
|
+
@webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA)
|
|
119
|
+
async def job_cancel(self, benchmark_id: str, job_id: str) -> None:
|
|
120
|
+
"""Cancel a job.
|
|
121
|
+
|
|
122
|
+
:param benchmark_id: The ID of the benchmark to run the evaluation on.
|
|
123
|
+
:param job_id: The ID of the job to cancel.
|
|
124
|
+
"""
|
|
125
|
+
...
|
|
126
|
+
|
|
127
|
+
@webmethod(
|
|
128
|
+
route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", method="GET", level=LLAMA_STACK_API_V1ALPHA
|
|
129
|
+
)
|
|
130
|
+
async def job_result(self, benchmark_id: str, job_id: str) -> EvaluateResponse:
|
|
131
|
+
"""Get the result of a job.
|
|
132
|
+
|
|
133
|
+
:param benchmark_id: The ID of the benchmark to run the evaluation on.
|
|
134
|
+
:param job_id: The ID of the job to get the result of.
|
|
135
|
+
:returns: The result of the job.
|
|
136
|
+
"""
|
|
137
|
+
...
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
"""File Processors API protocol and models.
|
|
8
|
+
|
|
9
|
+
This module contains the File Processors protocol definition.
|
|
10
|
+
Pydantic models are defined in llama_stack_api.file_processors.models.
|
|
11
|
+
The FastAPI router is defined in llama_stack_api.file_processors.fastapi_routes.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
# Import fastapi_routes for router factory access
|
|
15
|
+
from . import fastapi_routes
|
|
16
|
+
|
|
17
|
+
# Import protocol for re-export
|
|
18
|
+
from .api import FileProcessors
|
|
19
|
+
|
|
20
|
+
# Import models for re-export
|
|
21
|
+
from .models import ProcessFileResponse
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"FileProcessors",
|
|
25
|
+
"ProcessFileResponse",
|
|
26
|
+
"fastapi_routes",
|
|
27
|
+
]
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from typing import Any, Protocol, runtime_checkable
|
|
8
|
+
|
|
9
|
+
from fastapi import UploadFile
|
|
10
|
+
|
|
11
|
+
from llama_stack_api.vector_io import VectorStoreChunkingStrategy
|
|
12
|
+
|
|
13
|
+
from .models import ProcessFileResponse
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@runtime_checkable
|
|
17
|
+
class FileProcessors(Protocol):
|
|
18
|
+
"""
|
|
19
|
+
File Processor API for converting files into structured, processable content.
|
|
20
|
+
|
|
21
|
+
This API provides a flexible interface for processing various file formats
|
|
22
|
+
(PDFs, documents, images, etc.) into normalized text content that can be used for
|
|
23
|
+
vector store ingestion, RAG applications, or standalone content extraction.
|
|
24
|
+
|
|
25
|
+
The API focuses on parsing and normalization:
|
|
26
|
+
- Multiple file formats through extensible provider architecture
|
|
27
|
+
- Multipart form uploads or file ID references
|
|
28
|
+
- Configurable processing options per provider
|
|
29
|
+
- Optional chunking using provider's native capabilities
|
|
30
|
+
- Rich metadata about processing results
|
|
31
|
+
|
|
32
|
+
For embedding generation, use the chunks from this API with the separate
|
|
33
|
+
embedding API to maintain clean separation of concerns.
|
|
34
|
+
|
|
35
|
+
Future providers can extend this interface to support additional formats,
|
|
36
|
+
processing capabilities, and optimization strategies.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
async def process_file(
|
|
40
|
+
self,
|
|
41
|
+
file: UploadFile | None = None,
|
|
42
|
+
file_id: str | None = None,
|
|
43
|
+
options: dict[str, Any] | None = None,
|
|
44
|
+
chunking_strategy: VectorStoreChunkingStrategy | None = None,
|
|
45
|
+
) -> ProcessFileResponse:
|
|
46
|
+
"""
|
|
47
|
+
Process a file into chunks ready for vector database storage.
|
|
48
|
+
|
|
49
|
+
This method supports two modes of operation via multipart form request:
|
|
50
|
+
1. Direct upload: Upload and process a file directly (file parameter)
|
|
51
|
+
2. File storage: Process files already uploaded to file storage (file_id parameter)
|
|
52
|
+
|
|
53
|
+
Exactly one of file or file_id must be provided.
|
|
54
|
+
|
|
55
|
+
If no chunking_strategy is provided, the entire file content is returned as a single chunk.
|
|
56
|
+
If chunking_strategy is provided, the file is split according to the strategy.
|
|
57
|
+
|
|
58
|
+
:param file: The uploaded file object containing content and metadata (filename, content_type, etc.). Mutually exclusive with file_id.
|
|
59
|
+
:param file_id: ID of file already uploaded to file storage. Mutually exclusive with file.
|
|
60
|
+
:param options: Provider-specific processing options (e.g., OCR settings, output format).
|
|
61
|
+
:param chunking_strategy: Optional strategy for splitting content into chunks.
|
|
62
|
+
:returns: ProcessFileResponse with chunks ready for vector database storage.
|
|
63
|
+
"""
|
|
64
|
+
...
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
"""FastAPI router for the File Processors API.
|
|
8
|
+
|
|
9
|
+
This module defines the FastAPI router for the File Processors API using standard
|
|
10
|
+
FastAPI route decorators. The router is defined in the API package to keep
|
|
11
|
+
all API-related code together.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from typing import Annotated, Any
|
|
15
|
+
|
|
16
|
+
from fastapi import APIRouter, File, Form, UploadFile
|
|
17
|
+
|
|
18
|
+
from llama_stack_api.router_utils import standard_responses
|
|
19
|
+
from llama_stack_api.vector_io import VectorStoreChunkingStrategy
|
|
20
|
+
from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
|
|
21
|
+
|
|
22
|
+
from .api import FileProcessors
|
|
23
|
+
from .models import ProcessFileResponse
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def create_router(impl: FileProcessors) -> APIRouter:
|
|
27
|
+
"""Create a FastAPI router for the File Processors API.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
impl: The FileProcessors implementation instance
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
APIRouter configured for the File Processors API
|
|
34
|
+
"""
|
|
35
|
+
router = APIRouter(
|
|
36
|
+
prefix=f"/{LLAMA_STACK_API_V1ALPHA}",
|
|
37
|
+
tags=["File Processors"],
|
|
38
|
+
responses=standard_responses,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
@router.post(
|
|
42
|
+
"/file-processors/process",
|
|
43
|
+
response_model=ProcessFileResponse,
|
|
44
|
+
summary="Process a file into chunks ready for vector database storage.",
|
|
45
|
+
description="Process a file into chunks ready for vector database storage. Supports direct upload via multipart form or processing files already uploaded to file storage via file_id. Exactly one of file or file_id must be provided.",
|
|
46
|
+
responses={
|
|
47
|
+
200: {"description": "The processed file chunks."},
|
|
48
|
+
},
|
|
49
|
+
)
|
|
50
|
+
async def process_file(
|
|
51
|
+
file: Annotated[
|
|
52
|
+
UploadFile | None,
|
|
53
|
+
File(description="The File object to be uploaded and processed. Mutually exclusive with file_id."),
|
|
54
|
+
] = None,
|
|
55
|
+
file_id: Annotated[
|
|
56
|
+
str | None, Form(description="ID of file already uploaded to file storage. Mutually exclusive with file.")
|
|
57
|
+
] = None,
|
|
58
|
+
options: Annotated[
|
|
59
|
+
dict[str, Any] | None,
|
|
60
|
+
Form(
|
|
61
|
+
description="Optional processing options. Provider-specific parameters (e.g., OCR settings, output format)."
|
|
62
|
+
),
|
|
63
|
+
] = None,
|
|
64
|
+
chunking_strategy: Annotated[
|
|
65
|
+
VectorStoreChunkingStrategy | None,
|
|
66
|
+
Form(description="Optional chunking strategy for splitting content into chunks."),
|
|
67
|
+
] = None,
|
|
68
|
+
) -> ProcessFileResponse:
|
|
69
|
+
# Pass the parameters directly to the implementation
|
|
70
|
+
# The protocol method signature expects individual parameters for multipart handling
|
|
71
|
+
return await impl.process_file(
|
|
72
|
+
file=file,
|
|
73
|
+
file_id=file_id,
|
|
74
|
+
options=options,
|
|
75
|
+
chunking_strategy=chunking_strategy,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
return router
|