llama-stack-api 0.4.3__py3-none-any.whl → 0.5.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. llama_stack_api/__init__.py +1100 -0
  2. llama_stack_api/admin/__init__.py +45 -0
  3. llama_stack_api/admin/api.py +72 -0
  4. llama_stack_api/admin/fastapi_routes.py +117 -0
  5. llama_stack_api/admin/models.py +113 -0
  6. llama_stack_api/agents/__init__.py +38 -0
  7. llama_stack_api/agents/api.py +52 -0
  8. llama_stack_api/agents/fastapi_routes.py +268 -0
  9. llama_stack_api/agents/models.py +181 -0
  10. llama_stack_api/batches/__init__.py +40 -0
  11. llama_stack_api/batches/api.py +53 -0
  12. llama_stack_api/batches/fastapi_routes.py +113 -0
  13. llama_stack_api/batches/models.py +78 -0
  14. llama_stack_api/benchmarks/__init__.py +43 -0
  15. llama_stack_api/benchmarks/api.py +39 -0
  16. llama_stack_api/benchmarks/fastapi_routes.py +109 -0
  17. llama_stack_api/benchmarks/models.py +109 -0
  18. llama_stack_api/common/__init__.py +5 -0
  19. llama_stack_api/common/content_types.py +101 -0
  20. llama_stack_api/common/errors.py +110 -0
  21. llama_stack_api/common/job_types.py +38 -0
  22. llama_stack_api/common/responses.py +77 -0
  23. llama_stack_api/common/training_types.py +47 -0
  24. llama_stack_api/common/type_system.py +146 -0
  25. llama_stack_api/connectors/__init__.py +38 -0
  26. llama_stack_api/connectors/api.py +50 -0
  27. llama_stack_api/connectors/fastapi_routes.py +103 -0
  28. llama_stack_api/connectors/models.py +103 -0
  29. llama_stack_api/conversations/__init__.py +61 -0
  30. llama_stack_api/conversations/api.py +44 -0
  31. llama_stack_api/conversations/fastapi_routes.py +177 -0
  32. llama_stack_api/conversations/models.py +245 -0
  33. llama_stack_api/datasetio/__init__.py +34 -0
  34. llama_stack_api/datasetio/api.py +42 -0
  35. llama_stack_api/datasetio/fastapi_routes.py +94 -0
  36. llama_stack_api/datasetio/models.py +48 -0
  37. llama_stack_api/datasets/__init__.py +61 -0
  38. llama_stack_api/datasets/api.py +35 -0
  39. llama_stack_api/datasets/fastapi_routes.py +104 -0
  40. llama_stack_api/datasets/models.py +152 -0
  41. llama_stack_api/datatypes.py +373 -0
  42. llama_stack_api/eval/__init__.py +55 -0
  43. llama_stack_api/eval/api.py +51 -0
  44. llama_stack_api/eval/compat.py +300 -0
  45. llama_stack_api/eval/fastapi_routes.py +126 -0
  46. llama_stack_api/eval/models.py +141 -0
  47. llama_stack_api/file_processors/__init__.py +27 -0
  48. llama_stack_api/file_processors/api.py +64 -0
  49. llama_stack_api/file_processors/fastapi_routes.py +78 -0
  50. llama_stack_api/file_processors/models.py +42 -0
  51. llama_stack_api/files/__init__.py +35 -0
  52. llama_stack_api/files/api.py +51 -0
  53. llama_stack_api/files/fastapi_routes.py +124 -0
  54. llama_stack_api/files/models.py +107 -0
  55. llama_stack_api/inference/__init__.py +207 -0
  56. llama_stack_api/inference/api.py +93 -0
  57. llama_stack_api/inference/fastapi_routes.py +243 -0
  58. llama_stack_api/inference/models.py +1035 -0
  59. llama_stack_api/inspect_api/__init__.py +37 -0
  60. llama_stack_api/inspect_api/api.py +25 -0
  61. llama_stack_api/inspect_api/fastapi_routes.py +76 -0
  62. llama_stack_api/inspect_api/models.py +28 -0
  63. llama_stack_api/internal/__init__.py +9 -0
  64. llama_stack_api/internal/kvstore.py +28 -0
  65. llama_stack_api/internal/sqlstore.py +81 -0
  66. llama_stack_api/models/__init__.py +47 -0
  67. llama_stack_api/models/api.py +38 -0
  68. llama_stack_api/models/fastapi_routes.py +104 -0
  69. llama_stack_api/models/models.py +157 -0
  70. llama_stack_api/openai_responses.py +1494 -0
  71. llama_stack_api/post_training/__init__.py +73 -0
  72. llama_stack_api/post_training/api.py +36 -0
  73. llama_stack_api/post_training/fastapi_routes.py +116 -0
  74. llama_stack_api/post_training/models.py +339 -0
  75. llama_stack_api/prompts/__init__.py +47 -0
  76. llama_stack_api/prompts/api.py +44 -0
  77. llama_stack_api/prompts/fastapi_routes.py +163 -0
  78. llama_stack_api/prompts/models.py +177 -0
  79. llama_stack_api/providers/__init__.py +33 -0
  80. llama_stack_api/providers/api.py +16 -0
  81. llama_stack_api/providers/fastapi_routes.py +57 -0
  82. llama_stack_api/providers/models.py +24 -0
  83. llama_stack_api/rag_tool.py +168 -0
  84. llama_stack_api/resource.py +36 -0
  85. llama_stack_api/router_utils.py +160 -0
  86. llama_stack_api/safety/__init__.py +37 -0
  87. llama_stack_api/safety/api.py +29 -0
  88. llama_stack_api/safety/datatypes.py +83 -0
  89. llama_stack_api/safety/fastapi_routes.py +55 -0
  90. llama_stack_api/safety/models.py +38 -0
  91. llama_stack_api/schema_utils.py +251 -0
  92. llama_stack_api/scoring/__init__.py +66 -0
  93. llama_stack_api/scoring/api.py +35 -0
  94. llama_stack_api/scoring/fastapi_routes.py +67 -0
  95. llama_stack_api/scoring/models.py +81 -0
  96. llama_stack_api/scoring_functions/__init__.py +50 -0
  97. llama_stack_api/scoring_functions/api.py +39 -0
  98. llama_stack_api/scoring_functions/fastapi_routes.py +108 -0
  99. llama_stack_api/scoring_functions/models.py +214 -0
  100. llama_stack_api/shields/__init__.py +41 -0
  101. llama_stack_api/shields/api.py +39 -0
  102. llama_stack_api/shields/fastapi_routes.py +104 -0
  103. llama_stack_api/shields/models.py +74 -0
  104. llama_stack_api/tools.py +226 -0
  105. llama_stack_api/validators.py +46 -0
  106. llama_stack_api/vector_io/__init__.py +88 -0
  107. llama_stack_api/vector_io/api.py +234 -0
  108. llama_stack_api/vector_io/fastapi_routes.py +447 -0
  109. llama_stack_api/vector_io/models.py +663 -0
  110. llama_stack_api/vector_stores.py +53 -0
  111. llama_stack_api/version.py +9 -0
  112. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.5.0rc1.dist-info}/METADATA +1 -1
  113. llama_stack_api-0.5.0rc1.dist-info/RECORD +115 -0
  114. llama_stack_api-0.5.0rc1.dist-info/top_level.txt +1 -0
  115. llama_stack_api-0.4.3.dist-info/RECORD +0 -4
  116. llama_stack_api-0.4.3.dist-info/top_level.txt +0 -1
  117. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.5.0rc1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,77 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from enum import Enum
8
+ from typing import Any
9
+
10
+ from pydantic import BaseModel
11
+
12
+ from llama_stack_api.schema_utils import json_schema_type
13
+
14
+
15
+ class Order(Enum):
16
+ """Sort order for paginated responses.
17
+ :cvar asc: Ascending order
18
+ :cvar desc: Descending order
19
+ """
20
+
21
+ asc = "asc"
22
+ desc = "desc"
23
+
24
+
25
+ @json_schema_type
26
+ class PaginatedResponse(BaseModel):
27
+ """A generic paginated response that follows a simple format.
28
+
29
+ :param data: The list of items for the current page
30
+ :param has_more: Whether there are more items available after this set
31
+ :param url: The URL for accessing this list
32
+ """
33
+
34
+ data: list[dict[str, Any]]
35
+ has_more: bool
36
+ url: str | None = None
37
+
38
+
39
+ # This is a short term solution to allow inference API to return metrics
40
+ # The ideal way to do this is to have a way for all response types to include metrics
41
+ # and all metric events logged to the telemetry API to be included with the response
42
+ # To do this, we will need to augment all response types with a metrics field.
43
+ # We have hit a blocker from stainless SDK that prevents us from doing this.
44
+ # The blocker is that if we were to augment the response types that have a data field
45
+ # in them like so
46
+ # class ListModelsResponse(BaseModel):
47
+ # metrics: Optional[List[MetricEvent]] = None
48
+ # data: List[Models]
49
+ # ...
50
+ # The client SDK will need to access the data by using a .data field, which is not
51
+ # ergonomic. Stainless SDK does support unwrapping the response type, but it
52
+ # requires that the response type to only have a single field.
53
+
54
+ # We will need a way in the client SDK to signal that the metrics are needed
55
+ # and if they are needed, the client SDK has to return the full response type
56
+ # without unwrapping it.
57
+
58
+
59
+ @json_schema_type
60
+ class MetricInResponse(BaseModel):
61
+ """A metric value included in API responses.
62
+ :param metric: The name of the metric
63
+ :param value: The numeric value of the metric
64
+ :param unit: (Optional) The unit of measurement for the metric value
65
+ """
66
+
67
+ metric: str
68
+ value: int | float
69
+ unit: str | None = None
70
+
71
+
72
+ class MetricResponseMixin(BaseModel):
73
+ """Mixin class for API responses that can include metrics.
74
+ :param metrics: (Optional) List of metrics associated with the API response
75
+ """
76
+
77
+ metrics: list[MetricInResponse] | None = None
@@ -0,0 +1,47 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from datetime import datetime
8
+
9
+ from pydantic import BaseModel
10
+
11
+ from llama_stack_api.schema_utils import json_schema_type
12
+
13
+
14
+ @json_schema_type
15
+ class PostTrainingMetric(BaseModel):
16
+ """Training metrics captured during post-training jobs.
17
+
18
+ :param epoch: Training epoch number
19
+ :param train_loss: Loss value on the training dataset
20
+ :param validation_loss: Loss value on the validation dataset
21
+ :param perplexity: Perplexity metric indicating model confidence
22
+ """
23
+
24
+ epoch: int
25
+ train_loss: float
26
+ validation_loss: float
27
+ perplexity: float
28
+
29
+
30
+ @json_schema_type
31
+ class Checkpoint(BaseModel):
32
+ """Checkpoint created during training runs.
33
+
34
+ :param identifier: Unique identifier for the checkpoint
35
+ :param created_at: Timestamp when the checkpoint was created
36
+ :param epoch: Training epoch when the checkpoint was saved
37
+ :param post_training_job_id: Identifier of the training job that created this checkpoint
38
+ :param path: File system path where the checkpoint is stored
39
+ :param training_metrics: (Optional) Training metrics associated with this checkpoint
40
+ """
41
+
42
+ identifier: str
43
+ created_at: datetime
44
+ epoch: int
45
+ post_training_job_id: str
46
+ path: str
47
+ training_metrics: PostTrainingMetric | None = None
@@ -0,0 +1,146 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from typing import Annotated, Literal
8
+
9
+ from pydantic import BaseModel, Field
10
+
11
+ from llama_stack_api.schema_utils import json_schema_type, register_schema
12
+
13
+
14
+ @json_schema_type
15
+ class StringType(BaseModel):
16
+ """Parameter type for string values.
17
+
18
+ :param type: Discriminator type. Always "string"
19
+ """
20
+
21
+ type: Literal["string"] = "string"
22
+
23
+
24
+ @json_schema_type
25
+ class NumberType(BaseModel):
26
+ """Parameter type for numeric values.
27
+
28
+ :param type: Discriminator type. Always "number"
29
+ """
30
+
31
+ type: Literal["number"] = "number"
32
+
33
+
34
+ @json_schema_type
35
+ class BooleanType(BaseModel):
36
+ """Parameter type for boolean values.
37
+
38
+ :param type: Discriminator type. Always "boolean"
39
+ """
40
+
41
+ type: Literal["boolean"] = "boolean"
42
+
43
+
44
+ @json_schema_type
45
+ class ArrayType(BaseModel):
46
+ """Parameter type for array values.
47
+
48
+ :param type: Discriminator type. Always "array"
49
+ """
50
+
51
+ type: Literal["array"] = "array"
52
+
53
+
54
+ @json_schema_type
55
+ class ObjectType(BaseModel):
56
+ """Parameter type for object values.
57
+
58
+ :param type: Discriminator type. Always "object"
59
+ """
60
+
61
+ type: Literal["object"] = "object"
62
+
63
+
64
+ @json_schema_type
65
+ class JsonType(BaseModel):
66
+ """Parameter type for JSON values.
67
+
68
+ :param type: Discriminator type. Always "json"
69
+ """
70
+
71
+ type: Literal["json"] = "json"
72
+
73
+
74
+ @json_schema_type
75
+ class UnionType(BaseModel):
76
+ """Parameter type for union values.
77
+
78
+ :param type: Discriminator type. Always "union"
79
+ """
80
+
81
+ type: Literal["union"] = "union"
82
+
83
+
84
+ @json_schema_type
85
+ class ChatCompletionInputType(BaseModel):
86
+ """Parameter type for chat completion input.
87
+
88
+ :param type: Discriminator type. Always "chat_completion_input"
89
+ """
90
+
91
+ # expects List[Message] for messages
92
+ type: Literal["chat_completion_input"] = "chat_completion_input"
93
+
94
+
95
+ @json_schema_type
96
+ class CompletionInputType(BaseModel):
97
+ """Parameter type for completion input.
98
+
99
+ :param type: Discriminator type. Always "completion_input"
100
+ """
101
+
102
+ # expects InterleavedTextMedia for content
103
+ type: Literal["completion_input"] = "completion_input"
104
+
105
+
106
+ @json_schema_type
107
+ class DialogType(BaseModel):
108
+ """Parameter type for dialog data with semantic output labels.
109
+
110
+ :param type: Discriminator type. Always "dialog"
111
+ """
112
+
113
+ # expects List[Message] for messages
114
+ # this type semantically contains the output label whereas ChatCompletionInputType does not
115
+ type: Literal["dialog"] = "dialog"
116
+
117
+
118
+ ParamType = Annotated[
119
+ StringType
120
+ | NumberType
121
+ | BooleanType
122
+ | ArrayType
123
+ | ObjectType
124
+ | JsonType
125
+ | UnionType
126
+ | ChatCompletionInputType
127
+ | CompletionInputType,
128
+ Field(discriminator="type"),
129
+ ]
130
+ register_schema(ParamType, name="ParamType")
131
+
132
+ """
133
+ # TODO: recursive definition of ParamType in these containers
134
+ # will cause infinite recursion in OpenAPI generation script
135
+ # since we are going with ChatCompletionInputType and CompletionInputType
136
+ # we don't need to worry about ArrayType/ObjectType/UnionType for now
137
+ ArrayType.model_rebuild()
138
+ ObjectType.model_rebuild()
139
+ UnionType.model_rebuild()
140
+
141
+
142
+ class CustomType(BaseModel):
143
+ pylint: disable=syntax-error
144
+ type: Literal["custom"] = "custom"
145
+ validator_class: str
146
+ """
@@ -0,0 +1,38 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """Connectors API package.
8
+
9
+ This package contains the Connectors API definition, models, and FastAPI router.
10
+ """
11
+
12
+ from . import fastapi_routes
13
+ from .api import Connectors
14
+ from .models import (
15
+ CommonConnectorFields,
16
+ Connector,
17
+ ConnectorInput,
18
+ ConnectorType,
19
+ GetConnectorRequest,
20
+ GetConnectorToolRequest,
21
+ ListConnectorsResponse,
22
+ ListConnectorToolsRequest,
23
+ ListToolsResponse,
24
+ )
25
+
26
+ __all__ = [
27
+ "Connectors",
28
+ "CommonConnectorFields",
29
+ "Connector",
30
+ "ConnectorInput",
31
+ "ConnectorType",
32
+ "GetConnectorRequest",
33
+ "GetConnectorToolRequest",
34
+ "ListConnectorsResponse",
35
+ "ListConnectorToolsRequest",
36
+ "ListToolsResponse",
37
+ "fastapi_routes",
38
+ ]
@@ -0,0 +1,50 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """Connectors API protocol definition.
8
+
9
+ This module contains the Connectors protocol definition.
10
+ Pydantic models are defined in llama_stack_api.connectors.models.
11
+ The FastAPI router is defined in llama_stack_api.connectors.fastapi_routes.
12
+ """
13
+
14
+ from typing import Protocol, runtime_checkable
15
+
16
+ from llama_stack_api.tools import ToolDef
17
+
18
+ from .models import (
19
+ Connector,
20
+ GetConnectorRequest,
21
+ GetConnectorToolRequest,
22
+ ListConnectorsResponse,
23
+ ListConnectorToolsRequest,
24
+ ListToolsResponse,
25
+ )
26
+
27
+
28
+ @runtime_checkable
29
+ class Connectors(Protocol):
30
+ """Protocol for connector management operations."""
31
+
32
+ async def get_connector(
33
+ self,
34
+ request: GetConnectorRequest,
35
+ authorization: str | None = None,
36
+ ) -> Connector: ...
37
+
38
+ async def list_connectors(self) -> ListConnectorsResponse: ...
39
+
40
+ async def list_connector_tools(
41
+ self,
42
+ request: ListConnectorToolsRequest,
43
+ authorization: str | None = None,
44
+ ) -> ListToolsResponse: ...
45
+
46
+ async def get_connector_tool(
47
+ self,
48
+ request: GetConnectorToolRequest,
49
+ authorization: str | None = None,
50
+ ) -> ToolDef: ...
@@ -0,0 +1,103 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """FastAPI router for the Connectors API.
8
+
9
+ This module defines the FastAPI router for the Connectors API using standard
10
+ FastAPI route decorators.
11
+ """
12
+
13
+ from typing import Annotated
14
+
15
+ from fastapi import APIRouter, Depends, Path, Query
16
+
17
+ from llama_stack_api.router_utils import create_path_dependency, standard_responses
18
+ from llama_stack_api.tools import ToolDef
19
+ from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
20
+
21
+ from .api import Connectors
22
+ from .models import (
23
+ Connector,
24
+ GetConnectorRequest,
25
+ GetConnectorToolRequest,
26
+ ListConnectorsResponse,
27
+ ListConnectorToolsRequest,
28
+ ListToolsResponse,
29
+ )
30
+
31
+ # Path parameter dependencies for single-field request models
32
+ get_connector_request = create_path_dependency(GetConnectorRequest)
33
+ list_connector_tools_request = create_path_dependency(ListConnectorToolsRequest)
34
+
35
+
36
+ def create_router(impl: Connectors) -> APIRouter:
37
+ """Create a FastAPI router for the Connectors API.
38
+
39
+ Args:
40
+ impl: The Connectors implementation instance
41
+
42
+ Returns:
43
+ APIRouter configured for the Connectors API
44
+ """
45
+ router = APIRouter(
46
+ prefix=f"/{LLAMA_STACK_API_V1ALPHA}",
47
+ tags=["Connectors"],
48
+ responses=standard_responses,
49
+ )
50
+
51
+ @router.get(
52
+ "/connectors",
53
+ response_model=ListConnectorsResponse,
54
+ summary="List all connectors.",
55
+ description="List all configured connectors.",
56
+ )
57
+ async def list_connectors() -> ListConnectorsResponse:
58
+ return await impl.list_connectors()
59
+
60
+ # NOTE: Route order matters! More specific routes must come before less specific ones.
61
+ # /connectors/{connector_id}/tools/{tool_name} must come before /connectors/{connector_id}/tools
62
+ # /connectors/{connector_id}/tools must come before /connectors/{connector_id}
63
+
64
+ @router.get(
65
+ "/connectors/{connector_id}/tools/{tool_name}",
66
+ response_model=ToolDef,
67
+ summary="Get a tool by name from a connector.",
68
+ description="Get a tool definition by its name from a connector.",
69
+ )
70
+ async def get_connector_tool(
71
+ connector_id: Annotated[str, Path(description="Identifier for the connector")],
72
+ tool_name: Annotated[str, Path(description="Name of the tool")],
73
+ authorization: Annotated[str | None, Query(description="Authorization token")] = None,
74
+ ) -> ToolDef:
75
+ # GetConnectorToolRequest has 2 path params, so we construct it manually
76
+ request = GetConnectorToolRequest(connector_id=connector_id, tool_name=tool_name)
77
+ return await impl.get_connector_tool(request, authorization=authorization)
78
+
79
+ @router.get(
80
+ "/connectors/{connector_id}/tools",
81
+ response_model=ListToolsResponse,
82
+ summary="List tools from a connector.",
83
+ description="List all tools available from a connector.",
84
+ )
85
+ async def list_connector_tools(
86
+ request: Annotated[ListConnectorToolsRequest, Depends(list_connector_tools_request)],
87
+ authorization: Annotated[str | None, Query(description="Authorization token")] = None,
88
+ ) -> ListToolsResponse:
89
+ return await impl.list_connector_tools(request, authorization=authorization)
90
+
91
+ @router.get(
92
+ "/connectors/{connector_id}",
93
+ response_model=Connector,
94
+ summary="Get a connector by its ID.",
95
+ description="Get a connector by its ID.",
96
+ )
97
+ async def get_connector(
98
+ request: Annotated[GetConnectorRequest, Depends(get_connector_request)],
99
+ authorization: Annotated[str | None, Query(description="Authorization token")] = None,
100
+ ) -> Connector:
101
+ return await impl.get_connector(request, authorization=authorization)
102
+
103
+ return router
@@ -0,0 +1,103 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """Connectors API models.
8
+
9
+ This module contains the Pydantic models for the Connectors API.
10
+ """
11
+
12
+ from enum import StrEnum
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from llama_stack_api.schema_utils import json_schema_type
17
+ from llama_stack_api.tools import ToolDef
18
+
19
+
20
+ @json_schema_type
21
+ class ConnectorType(StrEnum):
22
+ """Type of connector."""
23
+
24
+ MCP = "mcp"
25
+
26
+
27
+ class CommonConnectorFields(BaseModel):
28
+ """Common fields for all connectors"""
29
+
30
+ connector_type: ConnectorType = Field(default=ConnectorType.MCP)
31
+ connector_id: str = Field(..., description="Identifier for the connector")
32
+ url: str = Field(..., description="URL of the connector")
33
+ server_label: str | None = Field(default=None, description="Label of the server")
34
+
35
+
36
+ @json_schema_type
37
+ class Connector(CommonConnectorFields):
38
+ """A connector registered in Llama Stack"""
39
+
40
+ model_config = {"populate_by_name": True}
41
+ server_name: str | None = Field(default=None, description="Name of the server")
42
+ server_description: str | None = Field(default=None, description="Description of the server")
43
+ server_version: str | None = Field(default=None, description="Version of the server")
44
+
45
+
46
+ @json_schema_type
47
+ class ConnectorInput(CommonConnectorFields):
48
+ """Input for creating a connector"""
49
+
50
+
51
+ # Path parameter models (single field for create_path_dependency)
52
+
53
+
54
+ @json_schema_type
55
+ class GetConnectorRequest(BaseModel):
56
+ """Request model for getting a connector by ID."""
57
+
58
+ connector_id: str = Field(..., description="Identifier for the connector")
59
+
60
+
61
+ @json_schema_type
62
+ class ListConnectorToolsRequest(BaseModel):
63
+ """Request model for listing tools from a connector."""
64
+
65
+ connector_id: str = Field(..., description="Identifier for the connector")
66
+
67
+
68
+ @json_schema_type
69
+ class GetConnectorToolRequest(BaseModel):
70
+ """Request model for getting a tool from a connector."""
71
+
72
+ connector_id: str = Field(..., description="Identifier for the connector")
73
+ tool_name: str = Field(..., description="Name of the tool")
74
+
75
+
76
+ # Response models
77
+
78
+
79
+ @json_schema_type
80
+ class ListConnectorsResponse(BaseModel):
81
+ """Response containing a list of configured connectors"""
82
+
83
+ data: list[Connector]
84
+
85
+
86
+ @json_schema_type
87
+ class ListToolsResponse(BaseModel):
88
+ """Response containing a list of tools"""
89
+
90
+ data: list[ToolDef]
91
+
92
+
93
+ __all__ = [
94
+ "ConnectorType",
95
+ "CommonConnectorFields",
96
+ "Connector",
97
+ "ConnectorInput",
98
+ "GetConnectorRequest",
99
+ "ListConnectorsResponse",
100
+ "ListConnectorToolsRequest",
101
+ "ListToolsResponse",
102
+ "GetConnectorToolRequest",
103
+ ]
@@ -0,0 +1,61 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ """Conversations API protocol and models.
8
+
9
+ This module contains the Conversations protocol definition.
10
+ Pydantic models are defined in llama_stack_api.conversations.models.
11
+ The FastAPI router is defined in llama_stack_api.conversations.fastapi_routes.
12
+ """
13
+
14
+ # Import fastapi_routes for router factory access
15
+ from . import fastapi_routes
16
+
17
+ # Import protocol for re-export
18
+ from .api import Conversations
19
+
20
+ # Import models for re-export
21
+ from .models import (
22
+ AddItemsRequest,
23
+ Conversation,
24
+ ConversationDeletedResource,
25
+ ConversationItem,
26
+ ConversationItemCreateRequest,
27
+ ConversationItemDeletedResource,
28
+ ConversationItemInclude,
29
+ ConversationItemList,
30
+ ConversationMessage,
31
+ CreateConversationRequest,
32
+ DeleteConversationRequest,
33
+ DeleteItemRequest,
34
+ GetConversationRequest,
35
+ ListItemsRequest,
36
+ Metadata,
37
+ RetrieveItemRequest,
38
+ UpdateConversationRequest,
39
+ )
40
+
41
+ __all__ = [
42
+ "Conversations",
43
+ "Conversation",
44
+ "ConversationMessage",
45
+ "ConversationItem",
46
+ "ConversationDeletedResource",
47
+ "ConversationItemCreateRequest",
48
+ "ConversationItemInclude",
49
+ "ConversationItemList",
50
+ "ConversationItemDeletedResource",
51
+ "Metadata",
52
+ "CreateConversationRequest",
53
+ "GetConversationRequest",
54
+ "UpdateConversationRequest",
55
+ "DeleteConversationRequest",
56
+ "AddItemsRequest",
57
+ "RetrieveItemRequest",
58
+ "ListItemsRequest",
59
+ "DeleteItemRequest",
60
+ "fastapi_routes",
61
+ ]
@@ -0,0 +1,44 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from typing import Protocol, runtime_checkable
8
+
9
+ from .models import (
10
+ AddItemsRequest,
11
+ Conversation,
12
+ ConversationDeletedResource,
13
+ ConversationItem,
14
+ ConversationItemDeletedResource,
15
+ ConversationItemList,
16
+ CreateConversationRequest,
17
+ DeleteConversationRequest,
18
+ DeleteItemRequest,
19
+ GetConversationRequest,
20
+ ListItemsRequest,
21
+ RetrieveItemRequest,
22
+ UpdateConversationRequest,
23
+ )
24
+
25
+
26
+ @runtime_checkable
27
+ class Conversations(Protocol):
28
+ """Protocol for conversation management operations."""
29
+
30
+ async def create_conversation(self, request: CreateConversationRequest) -> Conversation: ...
31
+
32
+ async def get_conversation(self, request: GetConversationRequest) -> Conversation: ...
33
+
34
+ async def update_conversation(self, conversation_id: str, request: UpdateConversationRequest) -> Conversation: ...
35
+
36
+ async def openai_delete_conversation(self, request: DeleteConversationRequest) -> ConversationDeletedResource: ...
37
+
38
+ async def add_items(self, conversation_id: str, request: AddItemsRequest) -> ConversationItemList: ...
39
+
40
+ async def retrieve(self, request: RetrieveItemRequest) -> ConversationItem: ...
41
+
42
+ async def list_items(self, request: ListItemsRequest) -> ConversationItemList: ...
43
+
44
+ async def openai_delete_conversation_item(self, request: DeleteItemRequest) -> ConversationItemDeletedResource: ...