llama-stack-api 0.4.3__py3-none-any.whl → 0.5.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. llama_stack_api/__init__.py +1100 -0
  2. llama_stack_api/admin/__init__.py +45 -0
  3. llama_stack_api/admin/api.py +72 -0
  4. llama_stack_api/admin/fastapi_routes.py +117 -0
  5. llama_stack_api/admin/models.py +113 -0
  6. llama_stack_api/agents/__init__.py +38 -0
  7. llama_stack_api/agents/api.py +52 -0
  8. llama_stack_api/agents/fastapi_routes.py +268 -0
  9. llama_stack_api/agents/models.py +181 -0
  10. llama_stack_api/batches/__init__.py +40 -0
  11. llama_stack_api/batches/api.py +53 -0
  12. llama_stack_api/batches/fastapi_routes.py +113 -0
  13. llama_stack_api/batches/models.py +78 -0
  14. llama_stack_api/benchmarks/__init__.py +43 -0
  15. llama_stack_api/benchmarks/api.py +39 -0
  16. llama_stack_api/benchmarks/fastapi_routes.py +109 -0
  17. llama_stack_api/benchmarks/models.py +109 -0
  18. llama_stack_api/common/__init__.py +5 -0
  19. llama_stack_api/common/content_types.py +101 -0
  20. llama_stack_api/common/errors.py +110 -0
  21. llama_stack_api/common/job_types.py +38 -0
  22. llama_stack_api/common/responses.py +77 -0
  23. llama_stack_api/common/training_types.py +47 -0
  24. llama_stack_api/common/type_system.py +146 -0
  25. llama_stack_api/connectors/__init__.py +38 -0
  26. llama_stack_api/connectors/api.py +50 -0
  27. llama_stack_api/connectors/fastapi_routes.py +103 -0
  28. llama_stack_api/connectors/models.py +103 -0
  29. llama_stack_api/conversations/__init__.py +61 -0
  30. llama_stack_api/conversations/api.py +44 -0
  31. llama_stack_api/conversations/fastapi_routes.py +177 -0
  32. llama_stack_api/conversations/models.py +245 -0
  33. llama_stack_api/datasetio/__init__.py +34 -0
  34. llama_stack_api/datasetio/api.py +42 -0
  35. llama_stack_api/datasetio/fastapi_routes.py +94 -0
  36. llama_stack_api/datasetio/models.py +48 -0
  37. llama_stack_api/datasets/__init__.py +61 -0
  38. llama_stack_api/datasets/api.py +35 -0
  39. llama_stack_api/datasets/fastapi_routes.py +104 -0
  40. llama_stack_api/datasets/models.py +152 -0
  41. llama_stack_api/datatypes.py +373 -0
  42. llama_stack_api/eval/__init__.py +55 -0
  43. llama_stack_api/eval/api.py +51 -0
  44. llama_stack_api/eval/compat.py +300 -0
  45. llama_stack_api/eval/fastapi_routes.py +126 -0
  46. llama_stack_api/eval/models.py +141 -0
  47. llama_stack_api/file_processors/__init__.py +27 -0
  48. llama_stack_api/file_processors/api.py +64 -0
  49. llama_stack_api/file_processors/fastapi_routes.py +78 -0
  50. llama_stack_api/file_processors/models.py +42 -0
  51. llama_stack_api/files/__init__.py +35 -0
  52. llama_stack_api/files/api.py +51 -0
  53. llama_stack_api/files/fastapi_routes.py +124 -0
  54. llama_stack_api/files/models.py +107 -0
  55. llama_stack_api/inference/__init__.py +207 -0
  56. llama_stack_api/inference/api.py +93 -0
  57. llama_stack_api/inference/fastapi_routes.py +243 -0
  58. llama_stack_api/inference/models.py +1035 -0
  59. llama_stack_api/inspect_api/__init__.py +37 -0
  60. llama_stack_api/inspect_api/api.py +25 -0
  61. llama_stack_api/inspect_api/fastapi_routes.py +76 -0
  62. llama_stack_api/inspect_api/models.py +28 -0
  63. llama_stack_api/internal/__init__.py +9 -0
  64. llama_stack_api/internal/kvstore.py +28 -0
  65. llama_stack_api/internal/sqlstore.py +81 -0
  66. llama_stack_api/models/__init__.py +47 -0
  67. llama_stack_api/models/api.py +38 -0
  68. llama_stack_api/models/fastapi_routes.py +104 -0
  69. llama_stack_api/models/models.py +157 -0
  70. llama_stack_api/openai_responses.py +1494 -0
  71. llama_stack_api/post_training/__init__.py +73 -0
  72. llama_stack_api/post_training/api.py +36 -0
  73. llama_stack_api/post_training/fastapi_routes.py +116 -0
  74. llama_stack_api/post_training/models.py +339 -0
  75. llama_stack_api/prompts/__init__.py +47 -0
  76. llama_stack_api/prompts/api.py +44 -0
  77. llama_stack_api/prompts/fastapi_routes.py +163 -0
  78. llama_stack_api/prompts/models.py +177 -0
  79. llama_stack_api/providers/__init__.py +33 -0
  80. llama_stack_api/providers/api.py +16 -0
  81. llama_stack_api/providers/fastapi_routes.py +57 -0
  82. llama_stack_api/providers/models.py +24 -0
  83. llama_stack_api/rag_tool.py +168 -0
  84. llama_stack_api/resource.py +36 -0
  85. llama_stack_api/router_utils.py +160 -0
  86. llama_stack_api/safety/__init__.py +37 -0
  87. llama_stack_api/safety/api.py +29 -0
  88. llama_stack_api/safety/datatypes.py +83 -0
  89. llama_stack_api/safety/fastapi_routes.py +55 -0
  90. llama_stack_api/safety/models.py +38 -0
  91. llama_stack_api/schema_utils.py +251 -0
  92. llama_stack_api/scoring/__init__.py +66 -0
  93. llama_stack_api/scoring/api.py +35 -0
  94. llama_stack_api/scoring/fastapi_routes.py +67 -0
  95. llama_stack_api/scoring/models.py +81 -0
  96. llama_stack_api/scoring_functions/__init__.py +50 -0
  97. llama_stack_api/scoring_functions/api.py +39 -0
  98. llama_stack_api/scoring_functions/fastapi_routes.py +108 -0
  99. llama_stack_api/scoring_functions/models.py +214 -0
  100. llama_stack_api/shields/__init__.py +41 -0
  101. llama_stack_api/shields/api.py +39 -0
  102. llama_stack_api/shields/fastapi_routes.py +104 -0
  103. llama_stack_api/shields/models.py +74 -0
  104. llama_stack_api/tools.py +226 -0
  105. llama_stack_api/validators.py +46 -0
  106. llama_stack_api/vector_io/__init__.py +88 -0
  107. llama_stack_api/vector_io/api.py +234 -0
  108. llama_stack_api/vector_io/fastapi_routes.py +447 -0
  109. llama_stack_api/vector_io/models.py +663 -0
  110. llama_stack_api/vector_stores.py +53 -0
  111. llama_stack_api/version.py +9 -0
  112. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.5.0rc1.dist-info}/METADATA +1 -1
  113. llama_stack_api-0.5.0rc1.dist-info/RECORD +115 -0
  114. llama_stack_api-0.5.0rc1.dist-info/top_level.txt +1 -0
  115. llama_stack_api-0.4.3.dist-info/RECORD +0 -4
  116. llama_stack_api-0.4.3.dist-info/top_level.txt +0 -1
  117. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.5.0rc1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1494 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from collections.abc import Sequence
8
+ from enum import Enum
9
+ from typing import Annotated, Any, Literal
10
+
11
+ from pydantic import BaseModel, Field, model_validator
12
+ from typing_extensions import TypedDict
13
+
14
+ from llama_stack_api.inference import OpenAITokenLogProb
15
+ from llama_stack_api.schema_utils import json_schema_type, register_schema
16
+ from llama_stack_api.vector_io import SearchRankingOptions as FileSearchRankingOptions
17
+
18
+ # NOTE(ashwin): this file is literally a copy of the OpenAI responses API schema. We should probably
19
+ # take their YAML and generate this file automatically. Their YAML is available.
20
+
21
+
22
+ @json_schema_type
23
+ class OpenAIResponseError(BaseModel):
24
+ """Error details for failed OpenAI response requests.
25
+
26
+ :param code: Error code identifying the type of failure
27
+ :param message: Human-readable error message describing the failure
28
+ """
29
+
30
+ code: str
31
+ message: str
32
+
33
+
34
+ @json_schema_type
35
+ class OpenAIResponseInputMessageContentText(BaseModel):
36
+ """Text content for input messages in OpenAI response format.
37
+
38
+ :param text: The text content of the input message
39
+ :param type: Content type identifier, always "input_text"
40
+ """
41
+
42
+ text: str
43
+ type: Literal["input_text"] = "input_text"
44
+
45
+
46
+ @json_schema_type
47
+ class OpenAIResponseInputMessageContentImage(BaseModel):
48
+ """Image content for input messages in OpenAI response format.
49
+
50
+ :param detail: Level of detail for image processing, can be "low", "high", or "auto"
51
+ :param type: Content type identifier, always "input_image"
52
+ :param file_id: (Optional) The ID of the file to be sent to the model.
53
+ :param image_url: (Optional) URL of the image content
54
+ """
55
+
56
+ detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
57
+ type: Literal["input_image"] = "input_image"
58
+ file_id: str | None = None
59
+ image_url: str | None = None
60
+
61
+
62
+ @json_schema_type
63
+ class OpenAIResponseInputMessageContentFile(BaseModel):
64
+ """File content for input messages in OpenAI response format.
65
+
66
+ :param type: The type of the input item. Always `input_file`.
67
+ :param file_data: The data of the file to be sent to the model.
68
+ :param file_id: (Optional) The ID of the file to be sent to the model.
69
+ :param file_url: The URL of the file to be sent to the model.
70
+ :param filename: The name of the file to be sent to the model.
71
+ """
72
+
73
+ type: Literal["input_file"] = "input_file"
74
+ file_data: str | None = None
75
+ file_id: str | None = None
76
+ file_url: str | None = None
77
+ filename: str | None = None
78
+
79
+ @model_validator(mode="after")
80
+ def validate_file_source(self) -> "OpenAIResponseInputMessageContentFile":
81
+ if not any([self.file_data, self.file_id, self.file_url, self.filename]):
82
+ raise ValueError(
83
+ "At least one of 'file_data', 'file_id', 'file_url', or 'filename' must be provided for file content"
84
+ )
85
+ return self
86
+
87
+
88
+ OpenAIResponseInputMessageContent = Annotated[
89
+ OpenAIResponseInputMessageContentText
90
+ | OpenAIResponseInputMessageContentImage
91
+ | OpenAIResponseInputMessageContentFile,
92
+ Field(discriminator="type"),
93
+ ]
94
+ register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent")
95
+
96
+
97
+ @json_schema_type
98
+ class OpenAIResponsePrompt(BaseModel):
99
+ """OpenAI compatible Prompt object that is used in OpenAI responses.
100
+
101
+ :param id: Unique identifier of the prompt template
102
+ :param variables: Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types
103
+ like images or files.
104
+ :param version: Version number of the prompt to use (defaults to latest if not specified)
105
+ """
106
+
107
+ id: str
108
+ variables: dict[str, OpenAIResponseInputMessageContent] | None = None
109
+ version: str | None = None
110
+
111
+
112
+ @json_schema_type
113
+ class OpenAIResponseAnnotationFileCitation(BaseModel):
114
+ """File citation annotation for referencing specific files in response content.
115
+
116
+ :param type: Annotation type identifier, always "file_citation"
117
+ :param file_id: Unique identifier of the referenced file
118
+ :param filename: Name of the referenced file
119
+ :param index: Position index of the citation within the content
120
+ """
121
+
122
+ type: Literal["file_citation"] = "file_citation"
123
+ file_id: str
124
+ filename: str
125
+ index: int
126
+
127
+
128
+ @json_schema_type
129
+ class OpenAIResponseAnnotationCitation(BaseModel):
130
+ """URL citation annotation for referencing external web resources.
131
+
132
+ :param type: Annotation type identifier, always "url_citation"
133
+ :param end_index: End position of the citation span in the content
134
+ :param start_index: Start position of the citation span in the content
135
+ :param title: Title of the referenced web resource
136
+ :param url: URL of the referenced web resource
137
+ """
138
+
139
+ type: Literal["url_citation"] = "url_citation"
140
+ end_index: int
141
+ start_index: int
142
+ title: str
143
+ url: str
144
+
145
+
146
+ @json_schema_type
147
+ class OpenAIResponseAnnotationContainerFileCitation(BaseModel):
148
+ type: Literal["container_file_citation"] = "container_file_citation"
149
+ container_id: str
150
+ end_index: int
151
+ file_id: str
152
+ filename: str
153
+ start_index: int
154
+
155
+
156
+ @json_schema_type
157
+ class OpenAIResponseAnnotationFilePath(BaseModel):
158
+ type: Literal["file_path"] = "file_path"
159
+ file_id: str
160
+ index: int
161
+
162
+
163
+ OpenAIResponseAnnotations = Annotated[
164
+ OpenAIResponseAnnotationFileCitation
165
+ | OpenAIResponseAnnotationCitation
166
+ | OpenAIResponseAnnotationContainerFileCitation
167
+ | OpenAIResponseAnnotationFilePath,
168
+ Field(discriminator="type"),
169
+ ]
170
+ register_schema(OpenAIResponseAnnotations, name="OpenAIResponseAnnotations")
171
+
172
+
173
+ @json_schema_type
174
+ class OpenAIResponseOutputMessageContentOutputText(BaseModel):
175
+ text: str
176
+ type: Literal["output_text"] = "output_text"
177
+ annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list)
178
+ logprobs: list[OpenAITokenLogProb] | None = None
179
+
180
+
181
+ @json_schema_type
182
+ class OpenAIResponseContentPartRefusal(BaseModel):
183
+ """Refusal content within a streamed response part.
184
+
185
+ :param type: Content part type identifier, always "refusal"
186
+ :param refusal: Refusal text supplied by the model
187
+ """
188
+
189
+ type: Literal["refusal"] = "refusal"
190
+ refusal: str
191
+
192
+
193
+ OpenAIResponseOutputMessageContent = Annotated[
194
+ OpenAIResponseOutputMessageContentOutputText | OpenAIResponseContentPartRefusal,
195
+ Field(discriminator="type"),
196
+ ]
197
+ register_schema(OpenAIResponseOutputMessageContent, name="OpenAIResponseOutputMessageContent")
198
+
199
+
200
+ @json_schema_type
201
+ class OpenAIResponseMessage(BaseModel):
202
+ """
203
+ Corresponds to the various Message types in the Responses API.
204
+ They are all under one type because the Responses API gives them all
205
+ the same "type" value, and there is no way to tell them apart in certain
206
+ scenarios.
207
+ """
208
+
209
+ content: str | Sequence[OpenAIResponseInputMessageContent] | Sequence[OpenAIResponseOutputMessageContent]
210
+ role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"]
211
+ type: Literal["message"] = "message"
212
+
213
+ # The fields below are not used in all scenarios, but are required in others.
214
+ id: str | None = None
215
+ status: str | None = None
216
+
217
+
218
+ @json_schema_type
219
+ class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
220
+ """Web search tool call output message for OpenAI responses.
221
+
222
+ :param id: Unique identifier for this tool call
223
+ :param status: Current status of the web search operation
224
+ :param type: Tool call type identifier, always "web_search_call"
225
+ """
226
+
227
+ id: str
228
+ status: str
229
+ type: Literal["web_search_call"] = "web_search_call"
230
+
231
+
232
+ class OpenAIResponseOutputMessageFileSearchToolCallResults(BaseModel):
233
+ """Search results returned by the file search operation.
234
+
235
+ :param attributes: (Optional) Key-value attributes associated with the file
236
+ :param file_id: Unique identifier of the file containing the result
237
+ :param filename: Name of the file containing the result
238
+ :param score: Relevance score for this search result (between 0 and 1)
239
+ :param text: Text content of the search result
240
+ """
241
+
242
+ attributes: dict[str, Any]
243
+ file_id: str
244
+ filename: str
245
+ score: float
246
+ text: str
247
+
248
+
249
+ @json_schema_type
250
+ class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
251
+ """File search tool call output message for OpenAI responses.
252
+
253
+ :param id: Unique identifier for this tool call
254
+ :param queries: List of search queries executed
255
+ :param status: Current status of the file search operation
256
+ :param type: Tool call type identifier, always "file_search_call"
257
+ :param results: (Optional) Search results returned by the file search operation
258
+ """
259
+
260
+ id: str
261
+ queries: Sequence[str]
262
+ status: str
263
+ type: Literal["file_search_call"] = "file_search_call"
264
+ results: Sequence[OpenAIResponseOutputMessageFileSearchToolCallResults] | None = None
265
+
266
+
267
+ @json_schema_type
268
+ class OpenAIResponseOutputMessageFunctionToolCall(BaseModel):
269
+ """Function tool call output message for OpenAI responses.
270
+
271
+ :param call_id: Unique identifier for the function call
272
+ :param name: Name of the function being called
273
+ :param arguments: JSON string containing the function arguments
274
+ :param type: Tool call type identifier, always "function_call"
275
+ :param id: (Optional) Additional identifier for the tool call
276
+ :param status: (Optional) Current status of the function call execution
277
+ """
278
+
279
+ call_id: str
280
+ name: str
281
+ arguments: str
282
+ type: Literal["function_call"] = "function_call"
283
+ id: str | None = None
284
+ status: str | None = None
285
+
286
+
287
+ @json_schema_type
288
+ class OpenAIResponseOutputMessageMCPCall(BaseModel):
289
+ """Model Context Protocol (MCP) call output message for OpenAI responses.
290
+
291
+ :param id: Unique identifier for this MCP call
292
+ :param type: Tool call type identifier, always "mcp_call"
293
+ :param arguments: JSON string containing the MCP call arguments
294
+ :param name: Name of the MCP method being called
295
+ :param server_label: Label identifying the MCP server handling the call
296
+ :param error: (Optional) Error message if the MCP call failed
297
+ :param output: (Optional) Output result from the successful MCP call
298
+ """
299
+
300
+ id: str
301
+ type: Literal["mcp_call"] = "mcp_call"
302
+ arguments: str
303
+ name: str
304
+ server_label: str
305
+ error: str | None = None
306
+ output: str | None = None
307
+
308
+
309
+ class MCPListToolsTool(BaseModel):
310
+ """Tool definition returned by MCP list tools operation.
311
+
312
+ :param input_schema: JSON schema defining the tool's input parameters
313
+ :param name: Name of the tool
314
+ :param description: (Optional) Description of what the tool does
315
+ """
316
+
317
+ input_schema: dict[str, Any]
318
+ name: str
319
+ description: str | None = None
320
+
321
+
322
+ @json_schema_type
323
+ class OpenAIResponseOutputMessageMCPListTools(BaseModel):
324
+ """MCP list tools output message containing available tools from an MCP server.
325
+
326
+ :param id: Unique identifier for this MCP list tools operation
327
+ :param type: Tool call type identifier, always "mcp_list_tools"
328
+ :param server_label: Label identifying the MCP server providing the tools
329
+ :param tools: List of available tools provided by the MCP server
330
+ """
331
+
332
+ id: str
333
+ type: Literal["mcp_list_tools"] = "mcp_list_tools"
334
+ server_label: str
335
+ tools: list[MCPListToolsTool]
336
+
337
+
338
+ @json_schema_type
339
+ class OpenAIResponseMCPApprovalRequest(BaseModel):
340
+ """
341
+ A request for human approval of a tool invocation.
342
+ """
343
+
344
+ arguments: str
345
+ id: str
346
+ name: str
347
+ server_label: str
348
+ type: Literal["mcp_approval_request"] = "mcp_approval_request"
349
+
350
+
351
+ @json_schema_type
352
+ class OpenAIResponseMCPApprovalResponse(BaseModel):
353
+ """
354
+ A response to an MCP approval request.
355
+ """
356
+
357
+ approval_request_id: str
358
+ approve: bool
359
+ type: Literal["mcp_approval_response"] = "mcp_approval_response"
360
+ id: str | None = None
361
+ reason: str | None = None
362
+
363
+
364
+ OpenAIResponseOutput = Annotated[
365
+ OpenAIResponseMessage
366
+ | OpenAIResponseOutputMessageWebSearchToolCall
367
+ | OpenAIResponseOutputMessageFileSearchToolCall
368
+ | OpenAIResponseOutputMessageFunctionToolCall
369
+ | OpenAIResponseOutputMessageMCPCall
370
+ | OpenAIResponseOutputMessageMCPListTools
371
+ | OpenAIResponseMCPApprovalRequest,
372
+ Field(discriminator="type"),
373
+ ]
374
+ register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
375
+
376
+
377
+ # This has to be a TypedDict because we need a "schema" field and our strong
378
+ # typing code in the schema generator doesn't support Pydantic aliases. That also
379
+ # means we can't use a discriminator field here, because TypedDicts don't support
380
+ # default values which the strong typing code requires for discriminators.
381
+ class OpenAIResponseTextFormat(TypedDict, total=False):
382
+ """Configuration for Responses API text format.
383
+
384
+ :param type: Must be "text", "json_schema", or "json_object" to identify the format type
385
+ :param name: The name of the response format. Only used for json_schema.
386
+ :param schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema.
387
+ :param description: (Optional) A description of the response format. Only used for json_schema.
388
+ :param strict: (Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema.
389
+ """
390
+
391
+ type: Literal["text"] | Literal["json_schema"] | Literal["json_object"]
392
+ name: str | None
393
+ schema: dict[str, Any] | None
394
+ description: str | None
395
+ strict: bool | None
396
+
397
+
398
+ @json_schema_type
399
+ class OpenAIResponseText(BaseModel):
400
+ """Text response configuration for OpenAI responses.
401
+
402
+ :param format: (Optional) Text format configuration specifying output format requirements
403
+ """
404
+
405
+ format: OpenAIResponseTextFormat | None = None
406
+
407
+
408
+ @json_schema_type
409
+ class OpenAIResponseReasoning(BaseModel):
410
+ """Configuration for reasoning effort in OpenAI responses.
411
+
412
+ Controls how much reasoning the model performs before generating a response.
413
+
414
+ :param effort: The effort level for reasoning. "low" favors speed and economical token usage,
415
+ "high" favors more complete reasoning, "medium" is a balance between the two.
416
+ """
417
+
418
+ effort: Literal["none", "minimal", "low", "medium", "high", "xhigh"] | None = None
419
+
420
+
421
+ # Must match type Literals of OpenAIResponseInputToolWebSearch below
422
+ WebSearchToolTypes = ["web_search", "web_search_preview", "web_search_preview_2025_03_11", "web_search_2025_08_26"]
423
+
424
+
425
+ @json_schema_type
426
+ class OpenAIResponseInputToolWebSearch(BaseModel):
427
+ """Web search tool configuration for OpenAI response inputs.
428
+
429
+ :param type: Web search tool type variant to use
430
+ :param search_context_size: (Optional) Size of search context, must be "low", "medium", or "high"
431
+ """
432
+
433
+ # Must match values of WebSearchToolTypes above
434
+ type: (
435
+ Literal["web_search"]
436
+ | Literal["web_search_preview"]
437
+ | Literal["web_search_preview_2025_03_11"]
438
+ | Literal["web_search_2025_08_26"]
439
+ ) = "web_search"
440
+ # TODO: actually use search_context_size somewhere...
441
+ search_context_size: str | None = Field(default="medium", pattern="^low|medium|high$")
442
+ # TODO: add user_location
443
+
444
+
445
+ @json_schema_type
446
+ class OpenAIResponseInputToolFunction(BaseModel):
447
+ """Function tool configuration for OpenAI response inputs.
448
+
449
+ :param type: Tool type identifier, always "function"
450
+ :param name: Name of the function that can be called
451
+ :param description: (Optional) Description of what the function does
452
+ :param parameters: (Optional) JSON schema defining the function's parameters
453
+ :param strict: (Optional) Whether to enforce strict parameter validation
454
+ """
455
+
456
+ type: Literal["function"] = "function"
457
+ name: str
458
+ description: str | None = None
459
+ parameters: dict[str, Any] | None
460
+ strict: bool | None = None
461
+
462
+
463
+ @json_schema_type
464
+ class OpenAIResponseInputToolFileSearch(BaseModel):
465
+ """File search tool configuration for OpenAI response inputs.
466
+
467
+ :param type: Tool type identifier, always "file_search"
468
+ :param vector_store_ids: List of vector store identifiers to search within
469
+ :param filters: (Optional) Additional filters to apply to the search
470
+ :param max_num_results: (Optional) Maximum number of search results to return (1-50)
471
+ :param ranking_options: (Optional) Options for ranking and scoring search results
472
+ """
473
+
474
+ type: Literal["file_search"] = "file_search"
475
+ vector_store_ids: list[str]
476
+ filters: dict[str, Any] | None = None
477
+ max_num_results: int | None = Field(default=10, ge=1, le=50)
478
+ ranking_options: FileSearchRankingOptions | None = None
479
+
480
+
481
+ class ApprovalFilter(BaseModel):
482
+ """Filter configuration for MCP tool approval requirements.
483
+
484
+ :param always: (Optional) List of tool names that always require approval
485
+ :param never: (Optional) List of tool names that never require approval
486
+ """
487
+
488
+ always: list[str] | None = None
489
+ never: list[str] | None = None
490
+
491
+
492
+ class AllowedToolsFilter(BaseModel):
493
+ """Filter configuration for restricting which MCP tools can be used.
494
+
495
+ :param tool_names: (Optional) List of specific tool names that are allowed
496
+ """
497
+
498
+ tool_names: list[str] | None = None
499
+
500
+
501
+ @json_schema_type
502
+ class OpenAIResponseInputToolMCP(BaseModel):
503
+ """Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
504
+
505
+ :param type: Tool type identifier, always "mcp"
506
+ :param server_label: Label to identify this MCP server
507
+ :param connector_id: (Optional) ID of the connector to use for this MCP server
508
+ :param server_url: (Optional) URL endpoint of the MCP server
509
+ :param headers: (Optional) HTTP headers to include when connecting to the server
510
+ :param authorization: (Optional) OAuth access token for authenticating with the MCP server
511
+ :param require_approval: Approval requirement for tool calls ("always", "never", or filter)
512
+ :param allowed_tools: (Optional) Restriction on which tools can be used from this server
513
+ """
514
+
515
+ type: Literal["mcp"] = "mcp"
516
+ server_label: str
517
+ connector_id: str | None = None
518
+ server_url: str | None = None
519
+ headers: dict[str, Any] | None = None
520
+ authorization: str | None = Field(default=None, exclude=True)
521
+
522
+ require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
523
+ allowed_tools: list[str] | AllowedToolsFilter | None = None
524
+
525
+ @model_validator(mode="after")
526
+ def validate_server_or_connector(self) -> "OpenAIResponseInputToolMCP":
527
+ if not self.server_url and not self.connector_id:
528
+ raise ValueError("Either 'server_url' or 'connector_id' must be provided for MCP tool")
529
+ return self
530
+
531
+
532
+ OpenAIResponseInputTool = Annotated[
533
+ OpenAIResponseInputToolWebSearch
534
+ | OpenAIResponseInputToolFileSearch
535
+ | OpenAIResponseInputToolFunction
536
+ | OpenAIResponseInputToolMCP,
537
+ Field(discriminator="type"),
538
+ ]
539
+ register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
540
+
541
+
542
+ @json_schema_type
543
+ class OpenAIResponseToolMCP(BaseModel):
544
+ """Model Context Protocol (MCP) tool configuration for OpenAI response object.
545
+
546
+ :param type: Tool type identifier, always "mcp"
547
+ :param server_label: Label to identify this MCP server
548
+ :param allowed_tools: (Optional) Restriction on which tools can be used from this server
549
+ """
550
+
551
+ type: Literal["mcp"] = "mcp"
552
+ server_label: str
553
+ allowed_tools: list[str] | AllowedToolsFilter | None = None
554
+
555
+
556
+ OpenAIResponseTool = Annotated[
557
+ OpenAIResponseInputToolWebSearch
558
+ | OpenAIResponseInputToolFileSearch
559
+ | OpenAIResponseInputToolFunction
560
+ | OpenAIResponseToolMCP, # The only type that differes from that in the inputs is the MCP tool
561
+ Field(discriminator="type"),
562
+ ]
563
+ register_schema(OpenAIResponseTool, name="OpenAIResponseTool")
564
+
565
+
566
+ @json_schema_type
567
+ class OpenAIResponseInputToolChoiceAllowedTools(BaseModel):
568
+ """Constrains the tools available to the model to a pre-defined set.
569
+
570
+ :param mode: Constrains the tools available to the model to a pre-defined set
571
+ :param tools: A list of tool definitions that the model should be allowed to call
572
+ :param type: Tool choice type identifier, always "allowed_tools"
573
+ """
574
+
575
+ mode: Literal["auto", "required"] = "auto"
576
+ tools: list[dict[str, str]]
577
+ type: Literal["allowed_tools"] = "allowed_tools"
578
+
579
+
580
+ @json_schema_type
581
+ class OpenAIResponseInputToolChoiceFileSearch(BaseModel):
582
+ """Indicates that the model should use file search to generate a response.
583
+
584
+ :param type: Tool choice type identifier, always "file_search"
585
+ """
586
+
587
+ type: Literal["file_search"] = "file_search"
588
+
589
+
590
+ @json_schema_type
591
+ class OpenAIResponseInputToolChoiceWebSearch(BaseModel):
592
+ """Indicates that the model should use web search to generate a response
593
+
594
+ :param type: Web search tool type variant to use
595
+ """
596
+
597
+ type: (
598
+ Literal["web_search"]
599
+ | Literal["web_search_preview"]
600
+ | Literal["web_search_preview_2025_03_11"]
601
+ | Literal["web_search_2025_08_26"]
602
+ ) = "web_search"
603
+
604
+
605
+ @json_schema_type
606
+ class OpenAIResponseInputToolChoiceFunctionTool(BaseModel):
607
+ """Forces the model to call a specific function.
608
+
609
+ :param name: The name of the function to call
610
+ :param type: Tool choice type identifier, always "function"
611
+ """
612
+
613
+ name: str
614
+ type: Literal["function"] = "function"
615
+
616
+
617
+ @json_schema_type
618
+ class OpenAIResponseInputToolChoiceMCPTool(BaseModel):
619
+ """Forces the model to call a specific tool on a remote MCP server
620
+
621
+ :param server_label: The label of the MCP server to use.
622
+ :param type: Tool choice type identifier, always "mcp"
623
+ :param name: (Optional) The name of the tool to call on the server.
624
+ """
625
+
626
+ server_label: str
627
+ type: Literal["mcp"] = "mcp"
628
+ name: str | None = None
629
+
630
+
631
+ @json_schema_type
632
+ class OpenAIResponseInputToolChoiceCustomTool(BaseModel):
633
+ """Forces the model to call a custom tool.
634
+
635
+ :param type: Tool choice type identifier, always "custom"
636
+ :param name: The name of the custom tool to call.
637
+ """
638
+
639
+ type: Literal["custom"] = "custom"
640
+ name: str
641
+
642
+
643
+ class OpenAIResponseInputToolChoiceMode(str, Enum):
644
+ auto = "auto"
645
+ required = "required"
646
+ none = "none"
647
+
648
+
649
+ OpenAIResponseInputToolChoiceObject = Annotated[
650
+ OpenAIResponseInputToolChoiceAllowedTools
651
+ | OpenAIResponseInputToolChoiceFileSearch
652
+ | OpenAIResponseInputToolChoiceWebSearch
653
+ | OpenAIResponseInputToolChoiceFunctionTool
654
+ | OpenAIResponseInputToolChoiceMCPTool
655
+ | OpenAIResponseInputToolChoiceCustomTool,
656
+ Field(discriminator="type"),
657
+ ]
658
+
659
+ # 3. Final Union without registration or None (Keep it clean)
660
+ OpenAIResponseInputToolChoice = OpenAIResponseInputToolChoiceMode | OpenAIResponseInputToolChoiceObject
661
+
662
+ register_schema(OpenAIResponseInputToolChoice, name="OpenAIResponseInputToolChoice")
663
+
664
+
665
+ class OpenAIResponseUsageOutputTokensDetails(BaseModel):
666
+ """Token details for output tokens in OpenAI response usage.
667
+
668
+ :param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)
669
+ """
670
+
671
+ reasoning_tokens: int
672
+
673
+
674
+ class OpenAIResponseUsageInputTokensDetails(BaseModel):
675
+ """Token details for input tokens in OpenAI response usage.
676
+
677
+ :param cached_tokens: Number of tokens retrieved from cache
678
+ """
679
+
680
+ cached_tokens: int
681
+
682
+
683
+ @json_schema_type
684
+ class OpenAIResponseUsage(BaseModel):
685
+ """Usage information for OpenAI response.
686
+
687
+ :param input_tokens: Number of tokens in the input
688
+ :param output_tokens: Number of tokens in the output
689
+ :param total_tokens: Total tokens used (input + output)
690
+ :param input_tokens_details: Detailed breakdown of input token usage
691
+ :param output_tokens_details: Detailed breakdown of output token usage
692
+ """
693
+
694
+ input_tokens: int
695
+ output_tokens: int
696
+ total_tokens: int
697
+ input_tokens_details: OpenAIResponseUsageInputTokensDetails
698
+ output_tokens_details: OpenAIResponseUsageOutputTokensDetails
699
+
700
+
701
+ @json_schema_type
702
+ class OpenAIResponseObject(BaseModel):
703
+ """Complete OpenAI response object containing generation results and metadata.
704
+
705
+ :param created_at: Unix timestamp when the response was created
706
+ :param error: (Optional) Error details if the response generation failed
707
+ :param id: Unique identifier for this response
708
+ :param model: Model identifier used for generation
709
+ :param object: Object type identifier, always "response"
710
+ :param output: List of generated output items (messages, tool calls, etc.)
711
+ :param parallel_tool_calls: (Optional) Whether to allow more than one function tool call generated per turn.
712
+ :param previous_response_id: (Optional) ID of the previous response in a conversation
713
+ :param prompt: (Optional) Reference to a prompt template and its variables.
714
+ :param status: Current status of the response generation
715
+ :param temperature: (Optional) Sampling temperature used for generation
716
+ :param text: Text formatting configuration for the response
717
+ :param top_p: (Optional) Nucleus sampling parameter used for generation
718
+ :param tools: (Optional) An array of tools the model may call while generating a response.
719
+ :param tool_choice: (Optional) Tool choice configuration for the response.
720
+ :param truncation: (Optional) Truncation strategy applied to the response
721
+ :param usage: (Optional) Token usage information for the response
722
+ :param instructions: (Optional) System message inserted into the model's context
723
+ :param max_tool_calls: (Optional) Max number of total calls to built-in tools that can be processed in a response
724
+ :param max_output_tokens: (Optional) An upper bound for the number of tokens that can be generated for a response, including visible output tokens.
725
+ :param metadata: (Optional) Dictionary of metadata key-value pairs
726
+ """
727
+
728
+ created_at: int
729
+ completed_at: int | None = None
730
+ error: OpenAIResponseError | None = None
731
+ id: str
732
+ model: str
733
+ object: Literal["response"] = "response"
734
+ output: Sequence[OpenAIResponseOutput]
735
+ parallel_tool_calls: bool | None = True
736
+ previous_response_id: str | None = None
737
+ prompt: OpenAIResponsePrompt | None = None
738
+ status: str
739
+ temperature: float | None = None
740
+ # Default to text format to avoid breaking the loading of old responses
741
+ # before the field was added. New responses will have this set always.
742
+ text: OpenAIResponseText = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text"))
743
+ top_p: float | None = None
744
+ tools: Sequence[OpenAIResponseTool] | None = None
745
+ tool_choice: OpenAIResponseInputToolChoice | None = None
746
+ truncation: str | None = None
747
+ usage: OpenAIResponseUsage | None = None
748
+ instructions: str | None = None
749
+ max_tool_calls: int | None = None
750
+ reasoning: OpenAIResponseReasoning | None = None
751
+ max_output_tokens: int | None = None
752
+ metadata: dict[str, str] | None = None
753
+ store: bool
754
+
755
+
756
+ @json_schema_type
757
+ class OpenAIDeleteResponseObject(BaseModel):
758
+ """Response object confirming deletion of an OpenAI response.
759
+
760
+ :param id: Unique identifier of the deleted response
761
+ :param object: Object type identifier, always "response"
762
+ :param deleted: Deletion confirmation flag, always True
763
+ """
764
+
765
+ id: str
766
+ object: Literal["response"] = "response"
767
+ deleted: bool = True
768
+
769
+
770
+ @json_schema_type
771
+ class OpenAIResponseObjectStreamResponseCreated(BaseModel):
772
+ """Streaming event indicating a new response has been created.
773
+
774
+ :param response: The response object that was created
775
+ :param type: Event type identifier, always "response.created"
776
+ """
777
+
778
+ response: OpenAIResponseObject
779
+ type: Literal["response.created"] = "response.created"
780
+
781
+
782
+ @json_schema_type
783
+ class OpenAIResponseObjectStreamResponseInProgress(BaseModel):
784
+ """Streaming event indicating the response remains in progress.
785
+
786
+ :param response: Current response state while in progress
787
+ :param sequence_number: Sequential number for ordering streaming events
788
+ :param type: Event type identifier, always "response.in_progress"
789
+ """
790
+
791
+ response: OpenAIResponseObject
792
+ sequence_number: int
793
+ type: Literal["response.in_progress"] = "response.in_progress"
794
+
795
+
796
+ @json_schema_type
797
+ class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
798
+ """Streaming event indicating a response has been completed.
799
+
800
+ :param response: Completed response object
801
+ :param type: Event type identifier, always "response.completed"
802
+ """
803
+
804
+ response: OpenAIResponseObject
805
+ type: Literal["response.completed"] = "response.completed"
806
+
807
+
808
+ @json_schema_type
809
+ class OpenAIResponseObjectStreamResponseIncomplete(BaseModel):
810
+ """Streaming event emitted when a response ends in an incomplete state.
811
+
812
+ :param response: Response object describing the incomplete state
813
+ :param sequence_number: Sequential number for ordering streaming events
814
+ :param type: Event type identifier, always "response.incomplete"
815
+ """
816
+
817
+ response: OpenAIResponseObject
818
+ sequence_number: int
819
+ type: Literal["response.incomplete"] = "response.incomplete"
820
+
821
+
822
+ @json_schema_type
823
+ class OpenAIResponseObjectStreamResponseFailed(BaseModel):
824
+ """Streaming event emitted when a response fails.
825
+
826
+ :param response: Response object describing the failure
827
+ :param sequence_number: Sequential number for ordering streaming events
828
+ :param type: Event type identifier, always "response.failed"
829
+ """
830
+
831
+ response: OpenAIResponseObject
832
+ sequence_number: int
833
+ type: Literal["response.failed"] = "response.failed"
834
+
835
+
836
+ @json_schema_type
837
+ class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel):
838
+ """Streaming event for when a new output item is added to the response.
839
+
840
+ :param response_id: Unique identifier of the response containing this output
841
+ :param item: The output item that was added (message, tool call, etc.)
842
+ :param output_index: Index position of this item in the output list
843
+ :param sequence_number: Sequential number for ordering streaming events
844
+ :param type: Event type identifier, always "response.output_item.added"
845
+ """
846
+
847
+ response_id: str
848
+ item: OpenAIResponseOutput
849
+ output_index: int
850
+ sequence_number: int
851
+ type: Literal["response.output_item.added"] = "response.output_item.added"
852
+
853
+
854
+ @json_schema_type
855
+ class OpenAIResponseObjectStreamResponseOutputItemDone(BaseModel):
856
+ """Streaming event for when an output item is completed.
857
+
858
+ :param response_id: Unique identifier of the response containing this output
859
+ :param item: The completed output item (message, tool call, etc.)
860
+ :param output_index: Index position of this item in the output list
861
+ :param sequence_number: Sequential number for ordering streaming events
862
+ :param type: Event type identifier, always "response.output_item.done"
863
+ """
864
+
865
+ response_id: str
866
+ item: OpenAIResponseOutput
867
+ output_index: int
868
+ sequence_number: int
869
+ type: Literal["response.output_item.done"] = "response.output_item.done"
870
+
871
+
872
+ @json_schema_type
873
+ class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel):
874
+ """Streaming event for incremental text content updates.
875
+
876
+ :param content_index: Index position within the text content
877
+ :param delta: Incremental text content being added
878
+ :param item_id: Unique identifier of the output item being updated
879
+ :param logprobs: (Optional) Token log probability details
880
+ :param output_index: Index position of the item in the output list
881
+ :param sequence_number: Sequential number for ordering streaming events
882
+ :param type: Event type identifier, always "response.output_text.delta"
883
+ """
884
+
885
+ content_index: int
886
+ delta: str
887
+ item_id: str
888
+ logprobs: list[OpenAITokenLogProb] | None = None
889
+ output_index: int
890
+ sequence_number: int
891
+ type: Literal["response.output_text.delta"] = "response.output_text.delta"
892
+
893
+
894
+ @json_schema_type
895
+ class OpenAIResponseObjectStreamResponseOutputTextDone(BaseModel):
896
+ """Streaming event for when text output is completed.
897
+
898
+ :param content_index: Index position within the text content
899
+ :param text: Final complete text content of the output item
900
+ :param item_id: Unique identifier of the completed output item
901
+ :param output_index: Index position of the item in the output list
902
+ :param sequence_number: Sequential number for ordering streaming events
903
+ :param type: Event type identifier, always "response.output_text.done"
904
+ """
905
+
906
+ content_index: int
907
+ text: str # final text of the output item
908
+ item_id: str
909
+ output_index: int
910
+ sequence_number: int
911
+ type: Literal["response.output_text.done"] = "response.output_text.done"
912
+
913
+
914
+ @json_schema_type
915
+ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(BaseModel):
916
+ """Streaming event for incremental function call argument updates.
917
+
918
+ :param delta: Incremental function call arguments being added
919
+ :param item_id: Unique identifier of the function call being updated
920
+ :param output_index: Index position of the item in the output list
921
+ :param sequence_number: Sequential number for ordering streaming events
922
+ :param type: Event type identifier, always "response.function_call_arguments.delta"
923
+ """
924
+
925
+ delta: str
926
+ item_id: str
927
+ output_index: int
928
+ sequence_number: int
929
+ type: Literal["response.function_call_arguments.delta"] = "response.function_call_arguments.delta"
930
+
931
+
932
+ @json_schema_type
933
+ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone(BaseModel):
934
+ """Streaming event for when function call arguments are completed.
935
+
936
+ :param arguments: Final complete arguments JSON string for the function call
937
+ :param item_id: Unique identifier of the completed function call
938
+ :param output_index: Index position of the item in the output list
939
+ :param sequence_number: Sequential number for ordering streaming events
940
+ :param type: Event type identifier, always "response.function_call_arguments.done"
941
+ """
942
+
943
+ arguments: str # final arguments of the function call
944
+ item_id: str
945
+ output_index: int
946
+ sequence_number: int
947
+ type: Literal["response.function_call_arguments.done"] = "response.function_call_arguments.done"
948
+
949
+
950
+ @json_schema_type
951
+ class OpenAIResponseObjectStreamResponseWebSearchCallInProgress(BaseModel):
952
+ """Streaming event for web search calls in progress.
953
+
954
+ :param item_id: Unique identifier of the web search call
955
+ :param output_index: Index position of the item in the output list
956
+ :param sequence_number: Sequential number for ordering streaming events
957
+ :param type: Event type identifier, always "response.web_search_call.in_progress"
958
+ """
959
+
960
+ item_id: str
961
+ output_index: int
962
+ sequence_number: int
963
+ type: Literal["response.web_search_call.in_progress"] = "response.web_search_call.in_progress"
964
+
965
+
966
+ @json_schema_type
967
+ class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel):
968
+ item_id: str
969
+ output_index: int
970
+ sequence_number: int
971
+ type: Literal["response.web_search_call.searching"] = "response.web_search_call.searching"
972
+
973
+
974
+ @json_schema_type
975
+ class OpenAIResponseObjectStreamResponseWebSearchCallCompleted(BaseModel):
976
+ """Streaming event for completed web search calls.
977
+
978
+ :param item_id: Unique identifier of the completed web search call
979
+ :param output_index: Index position of the item in the output list
980
+ :param sequence_number: Sequential number for ordering streaming events
981
+ :param type: Event type identifier, always "response.web_search_call.completed"
982
+ """
983
+
984
+ item_id: str
985
+ output_index: int
986
+ sequence_number: int
987
+ type: Literal["response.web_search_call.completed"] = "response.web_search_call.completed"
988
+
989
+
990
+ @json_schema_type
991
+ class OpenAIResponseObjectStreamResponseMcpListToolsInProgress(BaseModel):
992
+ sequence_number: int
993
+ type: Literal["response.mcp_list_tools.in_progress"] = "response.mcp_list_tools.in_progress"
994
+
995
+
996
+ @json_schema_type
997
+ class OpenAIResponseObjectStreamResponseMcpListToolsFailed(BaseModel):
998
+ sequence_number: int
999
+ type: Literal["response.mcp_list_tools.failed"] = "response.mcp_list_tools.failed"
1000
+
1001
+
1002
+ @json_schema_type
1003
+ class OpenAIResponseObjectStreamResponseMcpListToolsCompleted(BaseModel):
1004
+ sequence_number: int
1005
+ type: Literal["response.mcp_list_tools.completed"] = "response.mcp_list_tools.completed"
1006
+
1007
+
1008
+ @json_schema_type
1009
+ class OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta(BaseModel):
1010
+ delta: str
1011
+ item_id: str
1012
+ output_index: int
1013
+ sequence_number: int
1014
+ type: Literal["response.mcp_call.arguments.delta"] = "response.mcp_call.arguments.delta"
1015
+
1016
+
1017
+ @json_schema_type
1018
+ class OpenAIResponseObjectStreamResponseMcpCallArgumentsDone(BaseModel):
1019
+ arguments: str # final arguments of the MCP call
1020
+ item_id: str
1021
+ output_index: int
1022
+ sequence_number: int
1023
+ type: Literal["response.mcp_call.arguments.done"] = "response.mcp_call.arguments.done"
1024
+
1025
+
1026
+ @json_schema_type
1027
+ class OpenAIResponseObjectStreamResponseMcpCallInProgress(BaseModel):
1028
+ """Streaming event for MCP calls in progress.
1029
+
1030
+ :param item_id: Unique identifier of the MCP call
1031
+ :param output_index: Index position of the item in the output list
1032
+ :param sequence_number: Sequential number for ordering streaming events
1033
+ :param type: Event type identifier, always "response.mcp_call.in_progress"
1034
+ """
1035
+
1036
+ item_id: str
1037
+ output_index: int
1038
+ sequence_number: int
1039
+ type: Literal["response.mcp_call.in_progress"] = "response.mcp_call.in_progress"
1040
+
1041
+
1042
+ @json_schema_type
1043
+ class OpenAIResponseObjectStreamResponseMcpCallFailed(BaseModel):
1044
+ """Streaming event for failed MCP calls.
1045
+
1046
+ :param sequence_number: Sequential number for ordering streaming events
1047
+ :param type: Event type identifier, always "response.mcp_call.failed"
1048
+ """
1049
+
1050
+ sequence_number: int
1051
+ type: Literal["response.mcp_call.failed"] = "response.mcp_call.failed"
1052
+
1053
+
1054
+ @json_schema_type
1055
+ class OpenAIResponseObjectStreamResponseMcpCallCompleted(BaseModel):
1056
+ """Streaming event for completed MCP calls.
1057
+
1058
+ :param sequence_number: Sequential number for ordering streaming events
1059
+ :param type: Event type identifier, always "response.mcp_call.completed"
1060
+ """
1061
+
1062
+ sequence_number: int
1063
+ type: Literal["response.mcp_call.completed"] = "response.mcp_call.completed"
1064
+
1065
+
1066
+ @json_schema_type
1067
+ class OpenAIResponseContentPartOutputText(BaseModel):
1068
+ """Text content within a streamed response part.
1069
+
1070
+ :param type: Content part type identifier, always "output_text"
1071
+ :param text: Text emitted for this content part
1072
+ :param annotations: Structured annotations associated with the text
1073
+ :param logprobs: (Optional) Token log probability details
1074
+ """
1075
+
1076
+ type: Literal["output_text"] = "output_text"
1077
+ text: str
1078
+ annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list)
1079
+ logprobs: list[OpenAITokenLogProb] | None = None
1080
+
1081
+
1082
+ @json_schema_type
1083
+ class OpenAIResponseContentPartReasoningText(BaseModel):
1084
+ """Reasoning text emitted as part of a streamed response.
1085
+
1086
+ :param type: Content part type identifier, always "reasoning_text"
1087
+ :param text: Reasoning text supplied by the model
1088
+ """
1089
+
1090
+ type: Literal["reasoning_text"] = "reasoning_text"
1091
+ text: str
1092
+
1093
+
1094
+ OpenAIResponseContentPart = Annotated[
1095
+ OpenAIResponseContentPartOutputText | OpenAIResponseContentPartRefusal | OpenAIResponseContentPartReasoningText,
1096
+ Field(discriminator="type"),
1097
+ ]
1098
+ register_schema(OpenAIResponseContentPart, name="OpenAIResponseContentPart")
1099
+
1100
+
1101
+ @json_schema_type
1102
+ class OpenAIResponseObjectStreamResponseContentPartAdded(BaseModel):
1103
+ """Streaming event for when a new content part is added to a response item.
1104
+
1105
+ :param content_index: Index position of the part within the content array
1106
+ :param response_id: Unique identifier of the response containing this content
1107
+ :param item_id: Unique identifier of the output item containing this content part
1108
+ :param output_index: Index position of the output item in the response
1109
+ :param part: The content part that was added
1110
+ :param sequence_number: Sequential number for ordering streaming events
1111
+ :param type: Event type identifier, always "response.content_part.added"
1112
+ """
1113
+
1114
+ content_index: int
1115
+ response_id: str
1116
+ item_id: str
1117
+ output_index: int
1118
+ part: OpenAIResponseContentPart
1119
+ sequence_number: int
1120
+ type: Literal["response.content_part.added"] = "response.content_part.added"
1121
+
1122
+
1123
+ @json_schema_type
1124
+ class OpenAIResponseObjectStreamResponseContentPartDone(BaseModel):
1125
+ """Streaming event for when a content part is completed.
1126
+
1127
+ :param content_index: Index position of the part within the content array
1128
+ :param response_id: Unique identifier of the response containing this content
1129
+ :param item_id: Unique identifier of the output item containing this content part
1130
+ :param output_index: Index position of the output item in the response
1131
+ :param part: The completed content part
1132
+ :param sequence_number: Sequential number for ordering streaming events
1133
+ :param type: Event type identifier, always "response.content_part.done"
1134
+ """
1135
+
1136
+ content_index: int
1137
+ response_id: str
1138
+ item_id: str
1139
+ output_index: int
1140
+ part: OpenAIResponseContentPart
1141
+ sequence_number: int
1142
+ type: Literal["response.content_part.done"] = "response.content_part.done"
1143
+
1144
+
1145
+ @json_schema_type
1146
+ class OpenAIResponseObjectStreamResponseReasoningTextDelta(BaseModel):
1147
+ """Streaming event for incremental reasoning text updates.
1148
+
1149
+ :param content_index: Index position of the reasoning content part
1150
+ :param delta: Incremental reasoning text being added
1151
+ :param item_id: Unique identifier of the output item being updated
1152
+ :param output_index: Index position of the item in the output list
1153
+ :param sequence_number: Sequential number for ordering streaming events
1154
+ :param type: Event type identifier, always "response.reasoning_text.delta"
1155
+ """
1156
+
1157
+ content_index: int
1158
+ delta: str
1159
+ item_id: str
1160
+ output_index: int
1161
+ sequence_number: int
1162
+ type: Literal["response.reasoning_text.delta"] = "response.reasoning_text.delta"
1163
+
1164
+
1165
+ @json_schema_type
1166
+ class OpenAIResponseObjectStreamResponseReasoningTextDone(BaseModel):
1167
+ """Streaming event for when reasoning text is completed.
1168
+
1169
+ :param content_index: Index position of the reasoning content part
1170
+ :param text: Final complete reasoning text
1171
+ :param item_id: Unique identifier of the completed output item
1172
+ :param output_index: Index position of the item in the output list
1173
+ :param sequence_number: Sequential number for ordering streaming events
1174
+ :param type: Event type identifier, always "response.reasoning_text.done"
1175
+ """
1176
+
1177
+ content_index: int
1178
+ text: str
1179
+ item_id: str
1180
+ output_index: int
1181
+ sequence_number: int
1182
+ type: Literal["response.reasoning_text.done"] = "response.reasoning_text.done"
1183
+
1184
+
1185
+ @json_schema_type
1186
+ class OpenAIResponseContentPartReasoningSummary(BaseModel):
1187
+ """Reasoning summary part in a streamed response.
1188
+
1189
+ :param type: Content part type identifier, always "summary_text"
1190
+ :param text: Summary text
1191
+ """
1192
+
1193
+ type: Literal["summary_text"] = "summary_text"
1194
+ text: str
1195
+
1196
+
1197
+ @json_schema_type
1198
+ class OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded(BaseModel):
1199
+ """Streaming event for when a new reasoning summary part is added.
1200
+
1201
+ :param item_id: Unique identifier of the output item
1202
+ :param output_index: Index position of the output item
1203
+ :param part: The summary part that was added
1204
+ :param sequence_number: Sequential number for ordering streaming events
1205
+ :param summary_index: Index of the summary part within the reasoning summary
1206
+ :param type: Event type identifier, always "response.reasoning_summary_part.added"
1207
+ """
1208
+
1209
+ item_id: str
1210
+ output_index: int
1211
+ part: OpenAIResponseContentPartReasoningSummary
1212
+ sequence_number: int
1213
+ summary_index: int
1214
+ type: Literal["response.reasoning_summary_part.added"] = "response.reasoning_summary_part.added"
1215
+
1216
+
1217
+ @json_schema_type
1218
+ class OpenAIResponseObjectStreamResponseReasoningSummaryPartDone(BaseModel):
1219
+ """Streaming event for when a reasoning summary part is completed.
1220
+
1221
+ :param item_id: Unique identifier of the output item
1222
+ :param output_index: Index position of the output item
1223
+ :param part: The completed summary part
1224
+ :param sequence_number: Sequential number for ordering streaming events
1225
+ :param summary_index: Index of the summary part within the reasoning summary
1226
+ :param type: Event type identifier, always "response.reasoning_summary_part.done"
1227
+ """
1228
+
1229
+ item_id: str
1230
+ output_index: int
1231
+ part: OpenAIResponseContentPartReasoningSummary
1232
+ sequence_number: int
1233
+ summary_index: int
1234
+ type: Literal["response.reasoning_summary_part.done"] = "response.reasoning_summary_part.done"
1235
+
1236
+
1237
+ @json_schema_type
1238
+ class OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta(BaseModel):
1239
+ """Streaming event for incremental reasoning summary text updates.
1240
+
1241
+ :param delta: Incremental summary text being added
1242
+ :param item_id: Unique identifier of the output item
1243
+ :param output_index: Index position of the output item
1244
+ :param sequence_number: Sequential number for ordering streaming events
1245
+ :param summary_index: Index of the summary part within the reasoning summary
1246
+ :param type: Event type identifier, always "response.reasoning_summary_text.delta"
1247
+ """
1248
+
1249
+ delta: str
1250
+ item_id: str
1251
+ output_index: int
1252
+ sequence_number: int
1253
+ summary_index: int
1254
+ type: Literal["response.reasoning_summary_text.delta"] = "response.reasoning_summary_text.delta"
1255
+
1256
+
1257
+ @json_schema_type
1258
+ class OpenAIResponseObjectStreamResponseReasoningSummaryTextDone(BaseModel):
1259
+ """Streaming event for when reasoning summary text is completed.
1260
+
1261
+ :param text: Final complete summary text
1262
+ :param item_id: Unique identifier of the output item
1263
+ :param output_index: Index position of the output item
1264
+ :param sequence_number: Sequential number for ordering streaming events
1265
+ :param summary_index: Index of the summary part within the reasoning summary
1266
+ :param type: Event type identifier, always "response.reasoning_summary_text.done"
1267
+ """
1268
+
1269
+ text: str
1270
+ item_id: str
1271
+ output_index: int
1272
+ sequence_number: int
1273
+ summary_index: int
1274
+ type: Literal["response.reasoning_summary_text.done"] = "response.reasoning_summary_text.done"
1275
+
1276
+
1277
+ @json_schema_type
1278
+ class OpenAIResponseObjectStreamResponseRefusalDelta(BaseModel):
1279
+ """Streaming event for incremental refusal text updates.
1280
+
1281
+ :param content_index: Index position of the content part
1282
+ :param delta: Incremental refusal text being added
1283
+ :param item_id: Unique identifier of the output item
1284
+ :param output_index: Index position of the item in the output list
1285
+ :param sequence_number: Sequential number for ordering streaming events
1286
+ :param type: Event type identifier, always "response.refusal.delta"
1287
+ """
1288
+
1289
+ content_index: int
1290
+ delta: str
1291
+ item_id: str
1292
+ output_index: int
1293
+ sequence_number: int
1294
+ type: Literal["response.refusal.delta"] = "response.refusal.delta"
1295
+
1296
+
1297
+ @json_schema_type
1298
+ class OpenAIResponseObjectStreamResponseRefusalDone(BaseModel):
1299
+ """Streaming event for when refusal text is completed.
1300
+
1301
+ :param content_index: Index position of the content part
1302
+ :param refusal: Final complete refusal text
1303
+ :param item_id: Unique identifier of the output item
1304
+ :param output_index: Index position of the item in the output list
1305
+ :param sequence_number: Sequential number for ordering streaming events
1306
+ :param type: Event type identifier, always "response.refusal.done"
1307
+ """
1308
+
1309
+ content_index: int
1310
+ refusal: str
1311
+ item_id: str
1312
+ output_index: int
1313
+ sequence_number: int
1314
+ type: Literal["response.refusal.done"] = "response.refusal.done"
1315
+
1316
+
1317
+ @json_schema_type
1318
+ class OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded(BaseModel):
1319
+ """Streaming event for when an annotation is added to output text.
1320
+
1321
+ :param item_id: Unique identifier of the item to which the annotation is being added
1322
+ :param output_index: Index position of the output item in the response's output array
1323
+ :param content_index: Index position of the content part within the output item
1324
+ :param annotation_index: Index of the annotation within the content part
1325
+ :param annotation: The annotation object being added
1326
+ :param sequence_number: Sequential number for ordering streaming events
1327
+ :param type: Event type identifier, always "response.output_text.annotation.added"
1328
+ """
1329
+
1330
+ item_id: str
1331
+ output_index: int
1332
+ content_index: int
1333
+ annotation_index: int
1334
+ annotation: OpenAIResponseAnnotations
1335
+ sequence_number: int
1336
+ type: Literal["response.output_text.annotation.added"] = "response.output_text.annotation.added"
1337
+
1338
+
1339
+ @json_schema_type
1340
+ class OpenAIResponseObjectStreamResponseFileSearchCallInProgress(BaseModel):
1341
+ """Streaming event for file search calls in progress.
1342
+
1343
+ :param item_id: Unique identifier of the file search call
1344
+ :param output_index: Index position of the item in the output list
1345
+ :param sequence_number: Sequential number for ordering streaming events
1346
+ :param type: Event type identifier, always "response.file_search_call.in_progress"
1347
+ """
1348
+
1349
+ item_id: str
1350
+ output_index: int
1351
+ sequence_number: int
1352
+ type: Literal["response.file_search_call.in_progress"] = "response.file_search_call.in_progress"
1353
+
1354
+
1355
+ @json_schema_type
1356
+ class OpenAIResponseObjectStreamResponseFileSearchCallSearching(BaseModel):
1357
+ """Streaming event for file search currently searching.
1358
+
1359
+ :param item_id: Unique identifier of the file search call
1360
+ :param output_index: Index position of the item in the output list
1361
+ :param sequence_number: Sequential number for ordering streaming events
1362
+ :param type: Event type identifier, always "response.file_search_call.searching"
1363
+ """
1364
+
1365
+ item_id: str
1366
+ output_index: int
1367
+ sequence_number: int
1368
+ type: Literal["response.file_search_call.searching"] = "response.file_search_call.searching"
1369
+
1370
+
1371
+ @json_schema_type
1372
+ class OpenAIResponseObjectStreamResponseFileSearchCallCompleted(BaseModel):
1373
+ """Streaming event for completed file search calls.
1374
+
1375
+ :param item_id: Unique identifier of the completed file search call
1376
+ :param output_index: Index position of the item in the output list
1377
+ :param sequence_number: Sequential number for ordering streaming events
1378
+ :param type: Event type identifier, always "response.file_search_call.completed"
1379
+ """
1380
+
1381
+ item_id: str
1382
+ output_index: int
1383
+ sequence_number: int
1384
+ type: Literal["response.file_search_call.completed"] = "response.file_search_call.completed"
1385
+
1386
+
1387
+ OpenAIResponseObjectStream = Annotated[
1388
+ OpenAIResponseObjectStreamResponseCreated
1389
+ | OpenAIResponseObjectStreamResponseInProgress
1390
+ | OpenAIResponseObjectStreamResponseOutputItemAdded
1391
+ | OpenAIResponseObjectStreamResponseOutputItemDone
1392
+ | OpenAIResponseObjectStreamResponseOutputTextDelta
1393
+ | OpenAIResponseObjectStreamResponseOutputTextDone
1394
+ | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta
1395
+ | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone
1396
+ | OpenAIResponseObjectStreamResponseWebSearchCallInProgress
1397
+ | OpenAIResponseObjectStreamResponseWebSearchCallSearching
1398
+ | OpenAIResponseObjectStreamResponseWebSearchCallCompleted
1399
+ | OpenAIResponseObjectStreamResponseMcpListToolsInProgress
1400
+ | OpenAIResponseObjectStreamResponseMcpListToolsFailed
1401
+ | OpenAIResponseObjectStreamResponseMcpListToolsCompleted
1402
+ | OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta
1403
+ | OpenAIResponseObjectStreamResponseMcpCallArgumentsDone
1404
+ | OpenAIResponseObjectStreamResponseMcpCallInProgress
1405
+ | OpenAIResponseObjectStreamResponseMcpCallFailed
1406
+ | OpenAIResponseObjectStreamResponseMcpCallCompleted
1407
+ | OpenAIResponseObjectStreamResponseContentPartAdded
1408
+ | OpenAIResponseObjectStreamResponseContentPartDone
1409
+ | OpenAIResponseObjectStreamResponseReasoningTextDelta
1410
+ | OpenAIResponseObjectStreamResponseReasoningTextDone
1411
+ | OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded
1412
+ | OpenAIResponseObjectStreamResponseReasoningSummaryPartDone
1413
+ | OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta
1414
+ | OpenAIResponseObjectStreamResponseReasoningSummaryTextDone
1415
+ | OpenAIResponseObjectStreamResponseRefusalDelta
1416
+ | OpenAIResponseObjectStreamResponseRefusalDone
1417
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded
1418
+ | OpenAIResponseObjectStreamResponseFileSearchCallInProgress
1419
+ | OpenAIResponseObjectStreamResponseFileSearchCallSearching
1420
+ | OpenAIResponseObjectStreamResponseFileSearchCallCompleted
1421
+ | OpenAIResponseObjectStreamResponseIncomplete
1422
+ | OpenAIResponseObjectStreamResponseFailed
1423
+ | OpenAIResponseObjectStreamResponseCompleted,
1424
+ Field(discriminator="type"),
1425
+ ]
1426
+ register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream")
1427
+
1428
+
1429
+ @json_schema_type
1430
+ class OpenAIResponseInputFunctionToolCallOutput(BaseModel):
1431
+ """
1432
+ This represents the output of a function call that gets passed back to the model.
1433
+ """
1434
+
1435
+ call_id: str
1436
+ output: str
1437
+ type: Literal["function_call_output"] = "function_call_output"
1438
+ id: str | None = None
1439
+ status: str | None = None
1440
+
1441
+
1442
+ OpenAIResponseInput = Annotated[
1443
+ # Responses API allows output messages to be passed in as input
1444
+ OpenAIResponseOutput
1445
+ | OpenAIResponseInputFunctionToolCallOutput
1446
+ | OpenAIResponseMCPApprovalResponse
1447
+ | OpenAIResponseMessage,
1448
+ Field(union_mode="left_to_right"),
1449
+ ]
1450
+ register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
1451
+
1452
+
1453
+ @json_schema_type
1454
+ class ListOpenAIResponseInputItem(BaseModel):
1455
+ """List container for OpenAI response input items.
1456
+
1457
+ :param data: List of input items
1458
+ :param object: Object type identifier, always "list"
1459
+ """
1460
+
1461
+ data: Sequence[OpenAIResponseInput]
1462
+ object: Literal["list"] = "list"
1463
+
1464
+
1465
+ @json_schema_type
1466
+ class OpenAIResponseObjectWithInput(OpenAIResponseObject):
1467
+ """OpenAI response object extended with input context information.
1468
+
1469
+ :param input: List of input items that led to this response
1470
+ """
1471
+
1472
+ input: Sequence[OpenAIResponseInput]
1473
+
1474
+ def to_response_object(self) -> OpenAIResponseObject:
1475
+ """Convert to OpenAIResponseObject by excluding input field."""
1476
+ return OpenAIResponseObject(**{k: v for k, v in self.model_dump().items() if k != "input"})
1477
+
1478
+
1479
+ @json_schema_type
1480
+ class ListOpenAIResponseObject(BaseModel):
1481
+ """Paginated list of OpenAI response objects with navigation metadata.
1482
+
1483
+ :param data: List of response objects with their input context
1484
+ :param has_more: Whether there are more results available beyond this page
1485
+ :param first_id: Identifier of the first item in this page
1486
+ :param last_id: Identifier of the last item in this page
1487
+ :param object: Object type identifier, always "list"
1488
+ """
1489
+
1490
+ data: Sequence[OpenAIResponseObjectWithInput]
1491
+ has_more: bool
1492
+ first_id: str
1493
+ last_id: str
1494
+ object: Literal["list"] = "list"