llama-stack-api 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. llama_stack_api/__init__.py +945 -0
  2. llama_stack_api/admin/__init__.py +45 -0
  3. llama_stack_api/admin/api.py +72 -0
  4. llama_stack_api/admin/fastapi_routes.py +117 -0
  5. llama_stack_api/admin/models.py +113 -0
  6. llama_stack_api/agents.py +173 -0
  7. llama_stack_api/batches/__init__.py +40 -0
  8. llama_stack_api/batches/api.py +53 -0
  9. llama_stack_api/batches/fastapi_routes.py +113 -0
  10. llama_stack_api/batches/models.py +78 -0
  11. llama_stack_api/benchmarks/__init__.py +43 -0
  12. llama_stack_api/benchmarks/api.py +39 -0
  13. llama_stack_api/benchmarks/fastapi_routes.py +109 -0
  14. llama_stack_api/benchmarks/models.py +109 -0
  15. llama_stack_api/common/__init__.py +5 -0
  16. llama_stack_api/common/content_types.py +101 -0
  17. llama_stack_api/common/errors.py +95 -0
  18. llama_stack_api/common/job_types.py +38 -0
  19. llama_stack_api/common/responses.py +77 -0
  20. llama_stack_api/common/training_types.py +47 -0
  21. llama_stack_api/common/type_system.py +146 -0
  22. llama_stack_api/connectors.py +146 -0
  23. llama_stack_api/conversations.py +270 -0
  24. llama_stack_api/datasetio.py +55 -0
  25. llama_stack_api/datasets/__init__.py +61 -0
  26. llama_stack_api/datasets/api.py +35 -0
  27. llama_stack_api/datasets/fastapi_routes.py +104 -0
  28. llama_stack_api/datasets/models.py +152 -0
  29. llama_stack_api/datatypes.py +373 -0
  30. llama_stack_api/eval.py +137 -0
  31. llama_stack_api/file_processors/__init__.py +27 -0
  32. llama_stack_api/file_processors/api.py +64 -0
  33. llama_stack_api/file_processors/fastapi_routes.py +78 -0
  34. llama_stack_api/file_processors/models.py +42 -0
  35. llama_stack_api/files/__init__.py +35 -0
  36. llama_stack_api/files/api.py +51 -0
  37. llama_stack_api/files/fastapi_routes.py +124 -0
  38. llama_stack_api/files/models.py +107 -0
  39. llama_stack_api/inference.py +1169 -0
  40. llama_stack_api/inspect_api/__init__.py +37 -0
  41. llama_stack_api/inspect_api/api.py +25 -0
  42. llama_stack_api/inspect_api/fastapi_routes.py +76 -0
  43. llama_stack_api/inspect_api/models.py +28 -0
  44. llama_stack_api/internal/__init__.py +9 -0
  45. llama_stack_api/internal/kvstore.py +28 -0
  46. llama_stack_api/internal/sqlstore.py +81 -0
  47. llama_stack_api/models.py +171 -0
  48. llama_stack_api/openai_responses.py +1468 -0
  49. llama_stack_api/post_training.py +370 -0
  50. llama_stack_api/prompts.py +203 -0
  51. llama_stack_api/providers/__init__.py +33 -0
  52. llama_stack_api/providers/api.py +16 -0
  53. llama_stack_api/providers/fastapi_routes.py +57 -0
  54. llama_stack_api/providers/models.py +24 -0
  55. llama_stack_api/rag_tool.py +168 -0
  56. llama_stack_api/resource.py +37 -0
  57. llama_stack_api/router_utils.py +160 -0
  58. llama_stack_api/safety.py +132 -0
  59. llama_stack_api/schema_utils.py +208 -0
  60. llama_stack_api/scoring.py +93 -0
  61. llama_stack_api/scoring_functions.py +211 -0
  62. llama_stack_api/shields.py +93 -0
  63. llama_stack_api/tools.py +226 -0
  64. llama_stack_api/vector_io.py +941 -0
  65. llama_stack_api/vector_stores.py +53 -0
  66. llama_stack_api/version.py +9 -0
  67. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.4.4.dist-info}/METADATA +1 -1
  68. llama_stack_api-0.4.4.dist-info/RECORD +70 -0
  69. llama_stack_api-0.4.4.dist-info/top_level.txt +1 -0
  70. llama_stack_api-0.4.3.dist-info/RECORD +0 -4
  71. llama_stack_api-0.4.3.dist-info/top_level.txt +0 -1
  72. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.4.4.dist-info}/WHEEL +0 -0
@@ -0,0 +1,37 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """Inspect API protocol and models.
8
+
9
+ This module contains the Inspect protocol definition.
10
+ Pydantic models are defined in llama_stack_api.inspect.models.
11
+ The FastAPI router is defined in llama_stack_api.inspect.fastapi_routes.
12
+ """
13
+
14
+ # Import fastapi_routes for router factory access
15
+ from . import fastapi_routes
16
+
17
+ # Import protocol for re-export
18
+ from .api import Inspect
19
+
20
+ # Import models for re-export
21
+ from .models import (
22
+ ApiFilter,
23
+ HealthInfo,
24
+ ListRoutesResponse,
25
+ RouteInfo,
26
+ VersionInfo,
27
+ )
28
+
29
+ __all__ = [
30
+ "Inspect",
31
+ "ApiFilter",
32
+ "HealthInfo",
33
+ "ListRoutesResponse",
34
+ "RouteInfo",
35
+ "VersionInfo",
36
+ "fastapi_routes",
37
+ ]
@@ -0,0 +1,25 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from typing import Protocol, runtime_checkable
8
+
9
+ from .models import (
10
+ ApiFilter,
11
+ HealthInfo,
12
+ ListRoutesResponse,
13
+ VersionInfo,
14
+ )
15
+
16
+
17
+ @runtime_checkable
18
+ class Inspect(Protocol):
19
+ """APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers."""
20
+
21
+ async def list_routes(self, api_filter: ApiFilter | None = None) -> ListRoutesResponse: ...
22
+
23
+ async def health(self) -> HealthInfo: ...
24
+
25
+ async def version(self) -> VersionInfo: ...
@@ -0,0 +1,76 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """FastAPI router for the Inspect API.
8
+
9
+ This module defines the FastAPI router for the Inspect API using standard
10
+ FastAPI route decorators.
11
+ """
12
+
13
+ from typing import Annotated
14
+
15
+ from fastapi import APIRouter, Query
16
+
17
+ from llama_stack_api.router_utils import PUBLIC_ROUTE_KEY, standard_responses
18
+ from llama_stack_api.version import LLAMA_STACK_API_V1
19
+
20
+ from .api import Inspect
21
+ from .models import (
22
+ ApiFilter,
23
+ HealthInfo,
24
+ ListRoutesResponse,
25
+ VersionInfo,
26
+ )
27
+
28
+
29
+ def create_router(impl: Inspect) -> APIRouter:
30
+ """Create a FastAPI router for the Inspect API."""
31
+ router = APIRouter(
32
+ prefix=f"/{LLAMA_STACK_API_V1}",
33
+ tags=["Inspect"],
34
+ responses=standard_responses,
35
+ )
36
+
37
+ @router.get(
38
+ "/inspect/routes",
39
+ response_model=ListRoutesResponse,
40
+ summary="List routes.",
41
+ description="List all available API routes with their methods and implementing providers.",
42
+ responses={200: {"description": "Response containing information about all available routes."}},
43
+ )
44
+ async def list_routes(
45
+ api_filter: Annotated[
46
+ ApiFilter | None,
47
+ Query(
48
+ description="Optional filter to control which routes are returned. Can be an API level ('v1', 'v1alpha', 'v1beta') to show non-deprecated routes at that level, or 'deprecated' to show deprecated routes across all levels. If not specified, returns all non-deprecated routes."
49
+ ),
50
+ ] = None,
51
+ ) -> ListRoutesResponse:
52
+ return await impl.list_routes(api_filter)
53
+
54
+ @router.get(
55
+ "/health",
56
+ response_model=HealthInfo,
57
+ summary="Get health status.",
58
+ description="Get the current health status of the service.",
59
+ responses={200: {"description": "Health information indicating if the service is operational."}},
60
+ openapi_extra={PUBLIC_ROUTE_KEY: True},
61
+ )
62
+ async def health() -> HealthInfo:
63
+ return await impl.health()
64
+
65
+ @router.get(
66
+ "/version",
67
+ response_model=VersionInfo,
68
+ summary="Get version.",
69
+ description="Get the version of the service.",
70
+ responses={200: {"description": "Version information containing the service version number."}},
71
+ openapi_extra={PUBLIC_ROUTE_KEY: True},
72
+ )
73
+ async def version() -> VersionInfo:
74
+ return await impl.version()
75
+
76
+ return router
@@ -0,0 +1,28 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """Pydantic models for Inspect API requests and responses.
8
+
9
+ This module re-exports models from llama_stack_api.admin.models to ensure
10
+ a single source of truth and avoid type conflicts.
11
+ """
12
+
13
+ # Import and re-export shared models from admin
14
+ from llama_stack_api.admin.models import (
15
+ ApiFilter,
16
+ HealthInfo,
17
+ ListRoutesResponse,
18
+ RouteInfo,
19
+ VersionInfo,
20
+ )
21
+
22
+ __all__ = [
23
+ "ApiFilter",
24
+ "RouteInfo",
25
+ "HealthInfo",
26
+ "VersionInfo",
27
+ "ListRoutesResponse",
28
+ ]
@@ -0,0 +1,9 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ # Internal subpackage for shared interfaces that are not part of the public API.
8
+
9
+ __all__: list[str] = []
@@ -0,0 +1,28 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from datetime import datetime
8
+ from typing import Protocol
9
+
10
+
11
+ class KVStore(Protocol):
12
+ """Protocol for simple key/value storage backends."""
13
+
14
+ # TODO: make the value type bytes instead of str
15
+ async def set(self, key: str, value: str, expiration: datetime | None = None) -> None: ...
16
+
17
+ async def get(self, key: str) -> str | None: ...
18
+
19
+ async def delete(self, key: str) -> None: ...
20
+
21
+ async def values_in_range(self, start_key: str, end_key: str) -> list[str]: ...
22
+
23
+ async def keys_in_range(self, start_key: str, end_key: str) -> list[str]: ...
24
+
25
+ async def shutdown(self) -> None: ...
26
+
27
+
28
+ __all__ = ["KVStore"]
@@ -0,0 +1,81 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from collections.abc import Mapping, Sequence
8
+ from enum import Enum
9
+ from typing import Any, Literal, Protocol
10
+
11
+ from pydantic import BaseModel
12
+
13
+ from llama_stack_api import PaginatedResponse
14
+
15
+
16
+ class ColumnType(Enum):
17
+ INTEGER = "INTEGER"
18
+ STRING = "STRING"
19
+ TEXT = "TEXT"
20
+ FLOAT = "FLOAT"
21
+ BOOLEAN = "BOOLEAN"
22
+ JSON = "JSON"
23
+ DATETIME = "DATETIME"
24
+
25
+
26
+ class ColumnDefinition(BaseModel):
27
+ type: ColumnType
28
+ primary_key: bool = False
29
+ nullable: bool = True
30
+ default: Any = None
31
+
32
+
33
+ class SqlStore(Protocol):
34
+ """Protocol for common SQL-store functionality."""
35
+
36
+ async def create_table(self, table: str, schema: Mapping[str, ColumnType | ColumnDefinition]) -> None: ...
37
+
38
+ async def insert(self, table: str, data: Mapping[str, Any] | Sequence[Mapping[str, Any]]) -> None: ...
39
+
40
+ async def upsert(
41
+ self,
42
+ table: str,
43
+ data: Mapping[str, Any],
44
+ conflict_columns: list[str],
45
+ update_columns: list[str] | None = None,
46
+ ) -> None: ...
47
+
48
+ async def fetch_all(
49
+ self,
50
+ table: str,
51
+ where: Mapping[str, Any] | None = None,
52
+ where_sql: str | None = None,
53
+ limit: int | None = None,
54
+ order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
55
+ cursor: tuple[str, str] | None = None,
56
+ ) -> PaginatedResponse: ...
57
+
58
+ async def fetch_one(
59
+ self,
60
+ table: str,
61
+ where: Mapping[str, Any] | None = None,
62
+ where_sql: str | None = None,
63
+ order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
64
+ ) -> dict[str, Any] | None: ...
65
+
66
+ async def update(self, table: str, data: Mapping[str, Any], where: Mapping[str, Any]) -> None: ...
67
+
68
+ async def delete(self, table: str, where: Mapping[str, Any]) -> None: ...
69
+
70
+ async def add_column_if_not_exists(
71
+ self,
72
+ table: str,
73
+ column_name: str,
74
+ column_type: ColumnType,
75
+ nullable: bool = True,
76
+ ) -> None: ...
77
+
78
+ async def shutdown(self) -> None: ...
79
+
80
+
81
+ __all__ = ["ColumnDefinition", "ColumnType", "SqlStore"]
@@ -0,0 +1,171 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from enum import StrEnum
8
+ from typing import Any, Literal, Protocol, runtime_checkable
9
+
10
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
11
+
12
+ from llama_stack_api.resource import Resource, ResourceType
13
+ from llama_stack_api.schema_utils import json_schema_type, webmethod
14
+ from llama_stack_api.version import LLAMA_STACK_API_V1
15
+
16
+
17
+ class CommonModelFields(BaseModel):
18
+ metadata: dict[str, Any] = Field(
19
+ default_factory=dict,
20
+ description="Any additional metadata for this model",
21
+ )
22
+
23
+
24
+ @json_schema_type
25
+ class ModelType(StrEnum):
26
+ """Enumeration of supported model types in Llama Stack.
27
+ :cvar llm: Large language model for text generation and completion
28
+ :cvar embedding: Embedding model for converting text to vector representations
29
+ :cvar rerank: Reranking model for reordering documents based on their relevance to a query
30
+ """
31
+
32
+ llm = "llm"
33
+ embedding = "embedding"
34
+ rerank = "rerank"
35
+
36
+
37
+ @json_schema_type
38
+ class Model(CommonModelFields, Resource):
39
+ """A model resource representing an AI model registered in Llama Stack.
40
+
41
+ :param type: The resource type, always 'model' for model resources
42
+ :param model_type: The type of model (LLM or embedding model)
43
+ :param metadata: Any additional metadata for this model
44
+ :param identifier: Unique identifier for this resource in llama stack
45
+ :param provider_resource_id: Unique identifier for this resource in the provider
46
+ :param provider_id: ID of the provider that owns this resource
47
+ """
48
+
49
+ type: Literal[ResourceType.model] = ResourceType.model
50
+
51
+ @property
52
+ def model_id(self) -> str:
53
+ return self.identifier
54
+
55
+ @property
56
+ def provider_model_id(self) -> str:
57
+ assert self.provider_resource_id is not None, "Provider resource ID must be set"
58
+ return self.provider_resource_id
59
+
60
+ model_config = ConfigDict(protected_namespaces=())
61
+
62
+ model_type: ModelType = Field(default=ModelType.llm)
63
+
64
+ @field_validator("provider_resource_id")
65
+ @classmethod
66
+ def validate_provider_resource_id(cls, v):
67
+ if v is None:
68
+ raise ValueError("provider_resource_id cannot be None")
69
+ return v
70
+
71
+
72
+ class ModelInput(CommonModelFields):
73
+ model_id: str
74
+ provider_id: str | None = None
75
+ provider_model_id: str | None = None
76
+ model_type: ModelType | None = ModelType.llm
77
+ model_config = ConfigDict(protected_namespaces=())
78
+
79
+
80
+ class ListModelsResponse(BaseModel):
81
+ data: list[Model]
82
+
83
+
84
+ @json_schema_type
85
+ class OpenAIModel(BaseModel):
86
+ """A model from OpenAI.
87
+
88
+ :id: The ID of the model
89
+ :object: The object type, which will be "model"
90
+ :created: The Unix timestamp in seconds when the model was created
91
+ :owned_by: The owner of the model
92
+ :custom_metadata: Llama Stack-specific metadata including model_type, provider info, and additional metadata
93
+ """
94
+
95
+ id: str
96
+ object: Literal["model"] = "model"
97
+ created: int
98
+ owned_by: str
99
+ custom_metadata: dict[str, Any] | None = None
100
+
101
+
102
+ @json_schema_type
103
+ class OpenAIListModelsResponse(BaseModel):
104
+ data: list[OpenAIModel]
105
+
106
+
107
+ @runtime_checkable
108
+ class Models(Protocol):
109
+ async def list_models(self) -> ListModelsResponse:
110
+ """List all models.
111
+
112
+ :returns: A ListModelsResponse.
113
+ """
114
+ ...
115
+
116
+ @webmethod(route="/models", method="GET", level=LLAMA_STACK_API_V1)
117
+ async def openai_list_models(self) -> OpenAIListModelsResponse:
118
+ """List models using the OpenAI API.
119
+
120
+ :returns: A OpenAIListModelsResponse.
121
+ """
122
+ ...
123
+
124
+ @webmethod(route="/models/{model_id:path}", method="GET", level=LLAMA_STACK_API_V1)
125
+ async def get_model(
126
+ self,
127
+ model_id: str,
128
+ ) -> Model:
129
+ """Get model.
130
+
131
+ Get a model by its identifier.
132
+
133
+ :param model_id: The identifier of the model to get.
134
+ :returns: A Model.
135
+ """
136
+ ...
137
+
138
+ @webmethod(route="/models", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
139
+ async def register_model(
140
+ self,
141
+ model_id: str,
142
+ provider_model_id: str | None = None,
143
+ provider_id: str | None = None,
144
+ metadata: dict[str, Any] | None = None,
145
+ model_type: ModelType | None = None,
146
+ ) -> Model:
147
+ """Register model.
148
+
149
+ Register a model.
150
+
151
+ :param model_id: The identifier of the model to register.
152
+ :param provider_model_id: The identifier of the model in the provider.
153
+ :param provider_id: The identifier of the provider.
154
+ :param metadata: Any additional metadata for this model.
155
+ :param model_type: The type of model to register.
156
+ :returns: A Model.
157
+ """
158
+ ...
159
+
160
+ @webmethod(route="/models/{model_id:path}", method="DELETE", level=LLAMA_STACK_API_V1, deprecated=True)
161
+ async def unregister_model(
162
+ self,
163
+ model_id: str,
164
+ ) -> None:
165
+ """Unregister model.
166
+
167
+ Unregister a model.
168
+
169
+ :param model_id: The identifier of the model to unregister.
170
+ """
171
+ ...