llama-stack-api 0.4.4__py3-none-any.whl → 0.5.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. llama_stack_api/__init__.py +175 -20
  2. llama_stack_api/agents/__init__.py +38 -0
  3. llama_stack_api/agents/api.py +52 -0
  4. llama_stack_api/agents/fastapi_routes.py +268 -0
  5. llama_stack_api/agents/models.py +181 -0
  6. llama_stack_api/common/errors.py +15 -0
  7. llama_stack_api/connectors/__init__.py +38 -0
  8. llama_stack_api/connectors/api.py +50 -0
  9. llama_stack_api/connectors/fastapi_routes.py +103 -0
  10. llama_stack_api/connectors/models.py +103 -0
  11. llama_stack_api/conversations/__init__.py +61 -0
  12. llama_stack_api/conversations/api.py +44 -0
  13. llama_stack_api/conversations/fastapi_routes.py +177 -0
  14. llama_stack_api/conversations/models.py +245 -0
  15. llama_stack_api/datasetio/__init__.py +34 -0
  16. llama_stack_api/datasetio/api.py +42 -0
  17. llama_stack_api/datasetio/fastapi_routes.py +94 -0
  18. llama_stack_api/datasetio/models.py +48 -0
  19. llama_stack_api/eval/__init__.py +55 -0
  20. llama_stack_api/eval/api.py +51 -0
  21. llama_stack_api/eval/compat.py +300 -0
  22. llama_stack_api/eval/fastapi_routes.py +126 -0
  23. llama_stack_api/eval/models.py +141 -0
  24. llama_stack_api/inference/__init__.py +207 -0
  25. llama_stack_api/inference/api.py +93 -0
  26. llama_stack_api/inference/fastapi_routes.py +243 -0
  27. llama_stack_api/inference/models.py +1035 -0
  28. llama_stack_api/models/__init__.py +47 -0
  29. llama_stack_api/models/api.py +38 -0
  30. llama_stack_api/models/fastapi_routes.py +104 -0
  31. llama_stack_api/{models.py → models/models.py} +65 -79
  32. llama_stack_api/openai_responses.py +32 -6
  33. llama_stack_api/post_training/__init__.py +73 -0
  34. llama_stack_api/post_training/api.py +36 -0
  35. llama_stack_api/post_training/fastapi_routes.py +116 -0
  36. llama_stack_api/{post_training.py → post_training/models.py} +55 -86
  37. llama_stack_api/prompts/__init__.py +47 -0
  38. llama_stack_api/prompts/api.py +44 -0
  39. llama_stack_api/prompts/fastapi_routes.py +163 -0
  40. llama_stack_api/prompts/models.py +177 -0
  41. llama_stack_api/resource.py +0 -1
  42. llama_stack_api/safety/__init__.py +37 -0
  43. llama_stack_api/safety/api.py +29 -0
  44. llama_stack_api/safety/datatypes.py +83 -0
  45. llama_stack_api/safety/fastapi_routes.py +55 -0
  46. llama_stack_api/safety/models.py +38 -0
  47. llama_stack_api/schema_utils.py +47 -4
  48. llama_stack_api/scoring/__init__.py +66 -0
  49. llama_stack_api/scoring/api.py +35 -0
  50. llama_stack_api/scoring/fastapi_routes.py +67 -0
  51. llama_stack_api/scoring/models.py +81 -0
  52. llama_stack_api/scoring_functions/__init__.py +50 -0
  53. llama_stack_api/scoring_functions/api.py +39 -0
  54. llama_stack_api/scoring_functions/fastapi_routes.py +108 -0
  55. llama_stack_api/{scoring_functions.py → scoring_functions/models.py} +67 -64
  56. llama_stack_api/shields/__init__.py +41 -0
  57. llama_stack_api/shields/api.py +39 -0
  58. llama_stack_api/shields/fastapi_routes.py +104 -0
  59. llama_stack_api/shields/models.py +74 -0
  60. llama_stack_api/validators.py +46 -0
  61. llama_stack_api/vector_io/__init__.py +88 -0
  62. llama_stack_api/vector_io/api.py +234 -0
  63. llama_stack_api/vector_io/fastapi_routes.py +447 -0
  64. llama_stack_api/{vector_io.py → vector_io/models.py} +99 -377
  65. {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/METADATA +1 -1
  66. llama_stack_api-0.5.0rc1.dist-info/RECORD +115 -0
  67. llama_stack_api/agents.py +0 -173
  68. llama_stack_api/connectors.py +0 -146
  69. llama_stack_api/conversations.py +0 -270
  70. llama_stack_api/datasetio.py +0 -55
  71. llama_stack_api/eval.py +0 -137
  72. llama_stack_api/inference.py +0 -1169
  73. llama_stack_api/prompts.py +0 -203
  74. llama_stack_api/safety.py +0 -132
  75. llama_stack_api/scoring.py +0 -93
  76. llama_stack_api/shields.py +0 -93
  77. llama_stack_api-0.4.4.dist-info/RECORD +0 -70
  78. {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/WHEEL +0 -0
  79. {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/top_level.txt +0 -0
llama_stack_api/agents.py DELETED
@@ -1,173 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the terms described in the LICENSE file in
5
- # the root directory of this source tree.
6
-
7
- from collections.abc import AsyncIterator
8
- from enum import StrEnum
9
- from typing import Annotated, Protocol, runtime_checkable
10
-
11
- from pydantic import BaseModel
12
-
13
- from llama_stack_api.common.responses import Order
14
- from llama_stack_api.schema_utils import ExtraBodyField, json_schema_type, webmethod
15
- from llama_stack_api.version import LLAMA_STACK_API_V1
16
-
17
- from .openai_responses import (
18
- ListOpenAIResponseInputItem,
19
- ListOpenAIResponseObject,
20
- OpenAIDeleteResponseObject,
21
- OpenAIResponseInput,
22
- OpenAIResponseInputTool,
23
- OpenAIResponseInputToolChoice,
24
- OpenAIResponseObject,
25
- OpenAIResponseObjectStream,
26
- OpenAIResponsePrompt,
27
- OpenAIResponseText,
28
- )
29
-
30
-
31
- @json_schema_type
32
- class ResponseGuardrailSpec(BaseModel):
33
- """Specification for a guardrail to apply during response generation.
34
-
35
- :param type: The type/identifier of the guardrail.
36
- """
37
-
38
- type: str
39
- # TODO: more fields to be added for guardrail configuration
40
-
41
-
42
- ResponseGuardrail = str | ResponseGuardrailSpec
43
-
44
-
45
- class ResponseItemInclude(StrEnum):
46
- """
47
- Specify additional output data to include in the model response.
48
- """
49
-
50
- web_search_call_action_sources = "web_search_call.action.sources"
51
- code_interpreter_call_outputs = "code_interpreter_call.outputs"
52
- computer_call_output_output_image_url = "computer_call_output.output.image_url"
53
- file_search_call_results = "file_search_call.results"
54
- message_input_image_image_url = "message.input_image.image_url"
55
- message_output_text_logprobs = "message.output_text.logprobs"
56
- reasoning_encrypted_content = "reasoning.encrypted_content"
57
-
58
-
59
- @runtime_checkable
60
- class Agents(Protocol):
61
- """Agents
62
-
63
- APIs for creating and interacting with agentic systems."""
64
-
65
- # We situate the OpenAI Responses API in the Agents API just like we did things
66
- # for Inference. The Responses API, in its intent, serves the same purpose as
67
- # the Agents API above -- it is essentially a lightweight "agentic loop" with
68
- # integrated tool calling.
69
- #
70
- # Both of these APIs are inherently stateful.
71
-
72
- @webmethod(route="/responses/{response_id}", method="GET", level=LLAMA_STACK_API_V1)
73
- async def get_openai_response(
74
- self,
75
- response_id: str,
76
- ) -> OpenAIResponseObject:
77
- """Get a model response.
78
-
79
- :param response_id: The ID of the OpenAI response to retrieve.
80
- :returns: An OpenAIResponseObject.
81
- """
82
- ...
83
-
84
- @webmethod(route="/responses", method="POST", level=LLAMA_STACK_API_V1)
85
- async def create_openai_response(
86
- self,
87
- input: str | list[OpenAIResponseInput],
88
- model: str,
89
- prompt: OpenAIResponsePrompt | None = None,
90
- instructions: str | None = None,
91
- parallel_tool_calls: bool | None = True,
92
- previous_response_id: str | None = None,
93
- conversation: str | None = None,
94
- store: bool | None = True,
95
- stream: bool | None = False,
96
- temperature: float | None = None,
97
- text: OpenAIResponseText | None = None,
98
- tool_choice: OpenAIResponseInputToolChoice | None = None,
99
- tools: list[OpenAIResponseInputTool] | None = None,
100
- include: list[ResponseItemInclude] | None = None,
101
- max_infer_iters: int | None = 10, # this is an extension to the OpenAI API
102
- guardrails: Annotated[
103
- list[ResponseGuardrail] | None,
104
- ExtraBodyField(
105
- "List of guardrails to apply during response generation. Guardrails provide safety and content moderation."
106
- ),
107
- ] = None,
108
- max_tool_calls: int | None = None,
109
- metadata: dict[str, str] | None = None,
110
- ) -> OpenAIResponseObject | AsyncIterator[OpenAIResponseObjectStream]:
111
- """Create a model response.
112
-
113
- :param input: Input message(s) to create the response.
114
- :param model: The underlying LLM used for completions.
115
- :param prompt: (Optional) Prompt object with ID, version, and variables.
116
- :param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
117
- :param conversation: (Optional) The ID of a conversation to add the response to. Must begin with 'conv_'. Input and output messages will be automatically added to the conversation.
118
- :param include: (Optional) Additional fields to include in the response.
119
- :param guardrails: (Optional) List of guardrails to apply during response generation. Can be guardrail IDs (strings) or guardrail specifications.
120
- :param max_tool_calls: (Optional) Max number of total calls to built-in tools that can be processed in a response.
121
- :param metadata: (Optional) Dictionary of metadata key-value pairs to attach to the response.
122
- :returns: An OpenAIResponseObject.
123
- """
124
- ...
125
-
126
- @webmethod(route="/responses", method="GET", level=LLAMA_STACK_API_V1)
127
- async def list_openai_responses(
128
- self,
129
- after: str | None = None,
130
- limit: int | None = 50,
131
- model: str | None = None,
132
- order: Order | None = Order.desc,
133
- ) -> ListOpenAIResponseObject:
134
- """List all responses.
135
-
136
- :param after: The ID of the last response to return.
137
- :param limit: The number of responses to return.
138
- :param model: The model to filter responses by.
139
- :param order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
140
- :returns: A ListOpenAIResponseObject.
141
- """
142
- ...
143
-
144
- @webmethod(route="/responses/{response_id}/input_items", method="GET", level=LLAMA_STACK_API_V1)
145
- async def list_openai_response_input_items(
146
- self,
147
- response_id: str,
148
- after: str | None = None,
149
- before: str | None = None,
150
- include: list[str] | None = None,
151
- limit: int | None = 20,
152
- order: Order | None = Order.desc,
153
- ) -> ListOpenAIResponseInputItem:
154
- """List input items.
155
-
156
- :param response_id: The ID of the response to retrieve input items for.
157
- :param after: An item ID to list items after, used for pagination.
158
- :param before: An item ID to list items before, used for pagination.
159
- :param include: Additional fields to include in the response.
160
- :param limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
161
- :param order: The order to return the input items in. Default is desc.
162
- :returns: An ListOpenAIResponseInputItem.
163
- """
164
- ...
165
-
166
- @webmethod(route="/responses/{response_id}", method="DELETE", level=LLAMA_STACK_API_V1)
167
- async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
168
- """Delete a response.
169
-
170
- :param response_id: The ID of the OpenAI response to delete.
171
- :returns: An OpenAIDeleteResponseObject
172
- """
173
- ...
@@ -1,146 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the terms described in the LICENSE file in
5
- # the root directory of this source tree.
6
-
7
- from enum import StrEnum
8
- from typing import Literal, Protocol
9
-
10
- from pydantic import BaseModel, Field
11
- from typing_extensions import runtime_checkable
12
-
13
- from llama_stack_api.resource import Resource, ResourceType
14
- from llama_stack_api.schema_utils import json_schema_type, webmethod
15
- from llama_stack_api.tools import ToolDef
16
- from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
17
-
18
-
19
- @json_schema_type
20
- class ConnectorType(StrEnum):
21
- """Type of connector."""
22
-
23
- MCP = "mcp"
24
-
25
-
26
- class CommonConnectorFields(BaseModel):
27
- """Common fields for all connectors.
28
-
29
- :param connector_type: Type of connector
30
- :param connector_id: Identifier for the connector
31
- :param url: URL of the connector
32
- :param server_label: (Optional) Label of the server
33
- """
34
-
35
- connector_type: ConnectorType = Field(default=ConnectorType.MCP)
36
- connector_id: str = Field(..., description="Identifier for the connector")
37
- url: str = Field(..., description="URL of the connector")
38
- server_label: str | None = Field(default=None, description="Label of the server")
39
-
40
-
41
- @json_schema_type
42
- class Connector(CommonConnectorFields, Resource):
43
- """A connector resource representing a connector registered in Llama Stack.
44
-
45
- :param type: Type of resource, always 'connector' for connectors
46
- :param server_name: (Optional) Name of the server
47
- :param server_description: (Optional) Description of the server
48
- """
49
-
50
- model_config = {"populate_by_name": True}
51
- type: Literal[ResourceType.connector] = ResourceType.connector
52
- server_name: str | None = Field(default=None, description="Name of the server")
53
- server_description: str | None = Field(default=None, description="Description of the server")
54
-
55
-
56
- @json_schema_type
57
- class ConnectorInput(CommonConnectorFields):
58
- """Input for creating a connector
59
-
60
- :param type: Type of resource, always 'connector' for connectors
61
- """
62
-
63
- type: Literal[ResourceType.connector] = ResourceType.connector
64
-
65
-
66
- @json_schema_type
67
- class ListConnectorsResponse(BaseModel):
68
- """Response containing a list of connectors.
69
-
70
- :param data: List of connectors
71
- """
72
-
73
- data: list[Connector]
74
-
75
-
76
- @json_schema_type
77
- class ListToolsResponse(BaseModel):
78
- """Response containing a list of tools.
79
-
80
- :param data: List of tools
81
- """
82
-
83
- data: list[ToolDef]
84
-
85
-
86
- @runtime_checkable
87
- class Connectors(Protocol):
88
- # NOTE: Route order matters! More specific routes must come before less specific ones.
89
- # Routes with {param:path} are greedy and will match everything including slashes.
90
-
91
- @webmethod(route="/connectors", method="GET", level=LLAMA_STACK_API_V1ALPHA)
92
- async def list_connectors(
93
- self,
94
- ) -> ListConnectorsResponse:
95
- """List all configured connectors.
96
-
97
- :returns: A ListConnectorsResponse.
98
- """
99
- ...
100
-
101
- @webmethod(route="/connectors/{connector_id}/tools/{tool_name}", method="GET", level=LLAMA_STACK_API_V1ALPHA)
102
- async def get_connector_tool(
103
- self,
104
- connector_id: str,
105
- tool_name: str,
106
- authorization: str | None = None,
107
- ) -> ToolDef:
108
- """Get a tool definition by its name from a connector.
109
-
110
- :param connector_id: The ID of the connector to get the tool from.
111
- :param tool_name: The name of the tool to get.
112
- :param authorization: (Optional) OAuth access token for authenticating with the MCP server.
113
-
114
- :returns: A ToolDef.
115
- """
116
- ...
117
-
118
- @webmethod(route="/connectors/{connector_id}/tools", method="GET", level=LLAMA_STACK_API_V1ALPHA)
119
- async def list_connector_tools(
120
- self,
121
- connector_id: str,
122
- authorization: str | None = None,
123
- ) -> ListToolsResponse:
124
- """List tools available from a connector.
125
-
126
- :param connector_id: The ID of the connector to list tools for.
127
- :param authorization: (Optional) OAuth access token for authenticating with the MCP server.
128
-
129
- :returns: A ListToolsResponse.
130
- """
131
- ...
132
-
133
- @webmethod(route="/connectors/{connector_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA)
134
- async def get_connector(
135
- self,
136
- connector_id: str,
137
- authorization: str | None = None,
138
- ) -> Connector:
139
- """Get a connector by its ID.
140
-
141
- :param connector_id: The ID of the connector to get.
142
- :param authorization: (Optional) OAuth access token for authenticating with the MCP server.
143
-
144
- :returns: A Connector.
145
- """
146
- ...
@@ -1,270 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the terms described in the LICENSE file in
5
- # the root directory of this source tree.
6
-
7
- from enum import StrEnum
8
- from typing import Annotated, Literal, Protocol, runtime_checkable
9
-
10
- from pydantic import BaseModel, Field
11
-
12
- from llama_stack_api.openai_responses import (
13
- OpenAIResponseInputFunctionToolCallOutput,
14
- OpenAIResponseMCPApprovalRequest,
15
- OpenAIResponseMCPApprovalResponse,
16
- OpenAIResponseMessage,
17
- OpenAIResponseOutputMessageFileSearchToolCall,
18
- OpenAIResponseOutputMessageFunctionToolCall,
19
- OpenAIResponseOutputMessageMCPCall,
20
- OpenAIResponseOutputMessageMCPListTools,
21
- OpenAIResponseOutputMessageWebSearchToolCall,
22
- )
23
- from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod
24
- from llama_stack_api.version import LLAMA_STACK_API_V1
25
-
26
- Metadata = dict[str, str]
27
-
28
-
29
- @json_schema_type
30
- class Conversation(BaseModel):
31
- """OpenAI-compatible conversation object."""
32
-
33
- id: str = Field(..., description="The unique ID of the conversation.")
34
- object: Literal["conversation"] = Field(
35
- default="conversation", description="The object type, which is always conversation."
36
- )
37
- created_at: int = Field(
38
- ..., description="The time at which the conversation was created, measured in seconds since the Unix epoch."
39
- )
40
- metadata: Metadata | None = Field(
41
- default=None,
42
- description="Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.",
43
- )
44
- items: list[dict] | None = Field(
45
- default=None,
46
- description="Initial items to include in the conversation context. You may add up to 20 items at a time.",
47
- )
48
-
49
-
50
- @json_schema_type
51
- class ConversationMessage(BaseModel):
52
- """OpenAI-compatible message item for conversations."""
53
-
54
- id: str = Field(..., description="unique identifier for this message")
55
- content: list[dict] = Field(..., description="message content")
56
- role: str = Field(..., description="message role")
57
- status: str = Field(..., description="message status")
58
- type: Literal["message"] = "message"
59
- object: Literal["message"] = "message"
60
-
61
-
62
- ConversationItem = Annotated[
63
- OpenAIResponseMessage
64
- | OpenAIResponseOutputMessageWebSearchToolCall
65
- | OpenAIResponseOutputMessageFileSearchToolCall
66
- | OpenAIResponseOutputMessageFunctionToolCall
67
- | OpenAIResponseInputFunctionToolCallOutput
68
- | OpenAIResponseMCPApprovalRequest
69
- | OpenAIResponseMCPApprovalResponse
70
- | OpenAIResponseOutputMessageMCPCall
71
- | OpenAIResponseOutputMessageMCPListTools
72
- | OpenAIResponseOutputMessageMCPCall
73
- | OpenAIResponseOutputMessageMCPListTools,
74
- Field(discriminator="type"),
75
- ]
76
- register_schema(ConversationItem, name="ConversationItem")
77
-
78
- # Using OpenAI types directly caused issues but some notes for reference:
79
- # Note that ConversationItem is a Annotated Union of the types below:
80
- # from openai.types.responses import *
81
- # from openai.types.responses.response_item import *
82
- # from openai.types.conversations import ConversationItem
83
- # f = [
84
- # ResponseFunctionToolCallItem,
85
- # ResponseFunctionToolCallOutputItem,
86
- # ResponseFileSearchToolCall,
87
- # ResponseFunctionWebSearch,
88
- # ImageGenerationCall,
89
- # ResponseComputerToolCall,
90
- # ResponseComputerToolCallOutputItem,
91
- # ResponseReasoningItem,
92
- # ResponseCodeInterpreterToolCall,
93
- # LocalShellCall,
94
- # LocalShellCallOutput,
95
- # McpListTools,
96
- # McpApprovalRequest,
97
- # McpApprovalResponse,
98
- # McpCall,
99
- # ResponseCustomToolCall,
100
- # ResponseCustomToolCallOutput
101
- # ]
102
-
103
-
104
- @json_schema_type
105
- class ConversationDeletedResource(BaseModel):
106
- """Response for deleted conversation."""
107
-
108
- id: str = Field(..., description="The deleted conversation identifier")
109
- object: str = Field(default="conversation.deleted", description="Object type")
110
- deleted: bool = Field(default=True, description="Whether the object was deleted")
111
-
112
-
113
- @json_schema_type
114
- class ConversationItemCreateRequest(BaseModel):
115
- """Request body for creating conversation items."""
116
-
117
- items: list[ConversationItem] = Field(
118
- ...,
119
- description="Items to include in the conversation context. You may add up to 20 items at a time.",
120
- max_length=20,
121
- )
122
-
123
-
124
- class ConversationItemInclude(StrEnum):
125
- """
126
- Specify additional output data to include in the model response.
127
- """
128
-
129
- web_search_call_action_sources = "web_search_call.action.sources"
130
- code_interpreter_call_outputs = "code_interpreter_call.outputs"
131
- computer_call_output_output_image_url = "computer_call_output.output.image_url"
132
- file_search_call_results = "file_search_call.results"
133
- message_input_image_image_url = "message.input_image.image_url"
134
- message_output_text_logprobs = "message.output_text.logprobs"
135
- reasoning_encrypted_content = "reasoning.encrypted_content"
136
-
137
-
138
- @json_schema_type
139
- class ConversationItemList(BaseModel):
140
- """List of conversation items with pagination."""
141
-
142
- object: str = Field(default="list", description="Object type")
143
- data: list[ConversationItem] = Field(..., description="List of conversation items")
144
- first_id: str | None = Field(default=None, description="The ID of the first item in the list")
145
- last_id: str | None = Field(default=None, description="The ID of the last item in the list")
146
- has_more: bool = Field(default=False, description="Whether there are more items available")
147
-
148
-
149
- @json_schema_type
150
- class ConversationItemDeletedResource(BaseModel):
151
- """Response for deleted conversation item."""
152
-
153
- id: str = Field(..., description="The deleted item identifier")
154
- object: str = Field(default="conversation.item.deleted", description="Object type")
155
- deleted: bool = Field(default=True, description="Whether the object was deleted")
156
-
157
-
158
- @runtime_checkable
159
- class Conversations(Protocol):
160
- """Conversations
161
-
162
- Protocol for conversation management operations."""
163
-
164
- @webmethod(route="/conversations", method="POST", level=LLAMA_STACK_API_V1)
165
- async def create_conversation(
166
- self, items: list[ConversationItem] | None = None, metadata: Metadata | None = None
167
- ) -> Conversation:
168
- """Create a conversation.
169
-
170
- Create a conversation.
171
-
172
- :param items: Initial items to include in the conversation context.
173
- :param metadata: Set of key-value pairs that can be attached to an object.
174
- :returns: The created conversation object.
175
- """
176
- ...
177
-
178
- @webmethod(route="/conversations/{conversation_id}", method="GET", level=LLAMA_STACK_API_V1)
179
- async def get_conversation(self, conversation_id: str) -> Conversation:
180
- """Retrieve a conversation.
181
-
182
- Get a conversation with the given ID.
183
-
184
- :param conversation_id: The conversation identifier.
185
- :returns: The conversation object.
186
- """
187
- ...
188
-
189
- @webmethod(route="/conversations/{conversation_id}", method="POST", level=LLAMA_STACK_API_V1)
190
- async def update_conversation(self, conversation_id: str, metadata: Metadata) -> Conversation:
191
- """Update a conversation.
192
-
193
- Update a conversation's metadata with the given ID.
194
-
195
- :param conversation_id: The conversation identifier.
196
- :param metadata: Set of key-value pairs that can be attached to an object.
197
- :returns: The updated conversation object.
198
- """
199
- ...
200
-
201
- @webmethod(route="/conversations/{conversation_id}", method="DELETE", level=LLAMA_STACK_API_V1)
202
- async def openai_delete_conversation(self, conversation_id: str) -> ConversationDeletedResource:
203
- """Delete a conversation.
204
-
205
- Delete a conversation with the given ID.
206
-
207
- :param conversation_id: The conversation identifier.
208
- :returns: The deleted conversation resource.
209
- """
210
- ...
211
-
212
- @webmethod(route="/conversations/{conversation_id}/items", method="POST", level=LLAMA_STACK_API_V1)
213
- async def add_items(self, conversation_id: str, items: list[ConversationItem]) -> ConversationItemList:
214
- """Create items.
215
-
216
- Create items in the conversation.
217
-
218
- :param conversation_id: The conversation identifier.
219
- :param items: Items to include in the conversation context.
220
- :returns: List of created items.
221
- """
222
- ...
223
-
224
- @webmethod(route="/conversations/{conversation_id}/items/{item_id}", method="GET", level=LLAMA_STACK_API_V1)
225
- async def retrieve(self, conversation_id: str, item_id: str) -> ConversationItem:
226
- """Retrieve an item.
227
-
228
- Retrieve a conversation item.
229
-
230
- :param conversation_id: The conversation identifier.
231
- :param item_id: The item identifier.
232
- :returns: The conversation item.
233
- """
234
- ...
235
-
236
- @webmethod(route="/conversations/{conversation_id}/items", method="GET", level=LLAMA_STACK_API_V1)
237
- async def list_items(
238
- self,
239
- conversation_id: str,
240
- after: str | None = None,
241
- include: list[ConversationItemInclude] | None = None,
242
- limit: int | None = None,
243
- order: Literal["asc", "desc"] | None = None,
244
- ) -> ConversationItemList:
245
- """List items.
246
-
247
- List items in the conversation.
248
-
249
- :param conversation_id: The conversation identifier.
250
- :param after: An item ID to list items after, used in pagination.
251
- :param include: Specify additional output data to include in the response.
252
- :param limit: A limit on the number of objects to be returned (1-100, default 20).
253
- :param order: The order to return items in (asc or desc, default desc).
254
- :returns: List of conversation items.
255
- """
256
- ...
257
-
258
- @webmethod(route="/conversations/{conversation_id}/items/{item_id}", method="DELETE", level=LLAMA_STACK_API_V1)
259
- async def openai_delete_conversation_item(
260
- self, conversation_id: str, item_id: str
261
- ) -> ConversationItemDeletedResource:
262
- """Delete an item.
263
-
264
- Delete a conversation item.
265
-
266
- :param conversation_id: The conversation identifier.
267
- :param item_id: The item identifier.
268
- :returns: The deleted item resource.
269
- """
270
- ...
@@ -1,55 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the terms described in the LICENSE file in
5
- # the root directory of this source tree.
6
-
7
- from typing import Any, Protocol, runtime_checkable
8
-
9
- from llama_stack_api.common.responses import PaginatedResponse
10
- from llama_stack_api.datasets import Dataset
11
- from llama_stack_api.schema_utils import webmethod
12
- from llama_stack_api.version import LLAMA_STACK_API_V1BETA
13
-
14
-
15
- class DatasetStore(Protocol):
16
- def get_dataset(self, dataset_id: str) -> Dataset: ...
17
-
18
-
19
- @runtime_checkable
20
- class DatasetIO(Protocol):
21
- # keeping for aligning with inference/safety, but this is not used
22
- dataset_store: DatasetStore
23
-
24
- @webmethod(route="/datasetio/iterrows/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1BETA)
25
- async def iterrows(
26
- self,
27
- dataset_id: str,
28
- start_index: int | None = None,
29
- limit: int | None = None,
30
- ) -> PaginatedResponse:
31
- """Get a paginated list of rows from a dataset.
32
-
33
- Uses offset-based pagination where:
34
- - start_index: The starting index (0-based). If None, starts from beginning.
35
- - limit: Number of items to return. If None or -1, returns all items.
36
-
37
- The response includes:
38
- - data: List of items for the current page.
39
- - has_more: Whether there are more items available after this set.
40
-
41
- :param dataset_id: The ID of the dataset to get the rows from.
42
- :param start_index: Index into dataset for the first row to get. Get all rows if None.
43
- :param limit: The number of rows to get.
44
- :returns: A PaginatedResponse.
45
- """
46
- ...
47
-
48
- @webmethod(route="/datasetio/append-rows/{dataset_id:path}", method="POST", level=LLAMA_STACK_API_V1BETA)
49
- async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None:
50
- """Append rows to a dataset.
51
-
52
- :param dataset_id: The ID of the dataset to append the rows to.
53
- :param rows: The rows to append to the dataset.
54
- """
55
- ...