llama-stack-api 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. llama_stack_api/__init__.py +945 -0
  2. llama_stack_api/admin/__init__.py +45 -0
  3. llama_stack_api/admin/api.py +72 -0
  4. llama_stack_api/admin/fastapi_routes.py +117 -0
  5. llama_stack_api/admin/models.py +113 -0
  6. llama_stack_api/agents.py +173 -0
  7. llama_stack_api/batches/__init__.py +40 -0
  8. llama_stack_api/batches/api.py +53 -0
  9. llama_stack_api/batches/fastapi_routes.py +113 -0
  10. llama_stack_api/batches/models.py +78 -0
  11. llama_stack_api/benchmarks/__init__.py +43 -0
  12. llama_stack_api/benchmarks/api.py +39 -0
  13. llama_stack_api/benchmarks/fastapi_routes.py +109 -0
  14. llama_stack_api/benchmarks/models.py +109 -0
  15. llama_stack_api/common/__init__.py +5 -0
  16. llama_stack_api/common/content_types.py +101 -0
  17. llama_stack_api/common/errors.py +95 -0
  18. llama_stack_api/common/job_types.py +38 -0
  19. llama_stack_api/common/responses.py +77 -0
  20. llama_stack_api/common/training_types.py +47 -0
  21. llama_stack_api/common/type_system.py +146 -0
  22. llama_stack_api/connectors.py +146 -0
  23. llama_stack_api/conversations.py +270 -0
  24. llama_stack_api/datasetio.py +55 -0
  25. llama_stack_api/datasets/__init__.py +61 -0
  26. llama_stack_api/datasets/api.py +35 -0
  27. llama_stack_api/datasets/fastapi_routes.py +104 -0
  28. llama_stack_api/datasets/models.py +152 -0
  29. llama_stack_api/datatypes.py +373 -0
  30. llama_stack_api/eval.py +137 -0
  31. llama_stack_api/file_processors/__init__.py +27 -0
  32. llama_stack_api/file_processors/api.py +64 -0
  33. llama_stack_api/file_processors/fastapi_routes.py +78 -0
  34. llama_stack_api/file_processors/models.py +42 -0
  35. llama_stack_api/files/__init__.py +35 -0
  36. llama_stack_api/files/api.py +51 -0
  37. llama_stack_api/files/fastapi_routes.py +124 -0
  38. llama_stack_api/files/models.py +107 -0
  39. llama_stack_api/inference.py +1169 -0
  40. llama_stack_api/inspect_api/__init__.py +37 -0
  41. llama_stack_api/inspect_api/api.py +25 -0
  42. llama_stack_api/inspect_api/fastapi_routes.py +76 -0
  43. llama_stack_api/inspect_api/models.py +28 -0
  44. llama_stack_api/internal/__init__.py +9 -0
  45. llama_stack_api/internal/kvstore.py +28 -0
  46. llama_stack_api/internal/sqlstore.py +81 -0
  47. llama_stack_api/models.py +171 -0
  48. llama_stack_api/openai_responses.py +1468 -0
  49. llama_stack_api/post_training.py +370 -0
  50. llama_stack_api/prompts.py +203 -0
  51. llama_stack_api/providers/__init__.py +33 -0
  52. llama_stack_api/providers/api.py +16 -0
  53. llama_stack_api/providers/fastapi_routes.py +57 -0
  54. llama_stack_api/providers/models.py +24 -0
  55. llama_stack_api/rag_tool.py +168 -0
  56. llama_stack_api/resource.py +37 -0
  57. llama_stack_api/router_utils.py +160 -0
  58. llama_stack_api/safety.py +132 -0
  59. llama_stack_api/schema_utils.py +208 -0
  60. llama_stack_api/scoring.py +93 -0
  61. llama_stack_api/scoring_functions.py +211 -0
  62. llama_stack_api/shields.py +93 -0
  63. llama_stack_api/tools.py +226 -0
  64. llama_stack_api/vector_io.py +941 -0
  65. llama_stack_api/vector_stores.py +53 -0
  66. llama_stack_api/version.py +9 -0
  67. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.4.4.dist-info}/METADATA +1 -1
  68. llama_stack_api-0.4.4.dist-info/RECORD +70 -0
  69. llama_stack_api-0.4.4.dist-info/top_level.txt +1 -0
  70. llama_stack_api-0.4.3.dist-info/RECORD +0 -4
  71. llama_stack_api-0.4.3.dist-info/top_level.txt +0 -1
  72. {llama_stack_api-0.4.3.dist-info → llama_stack_api-0.4.4.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1468 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from collections.abc import Sequence
8
+ from enum import Enum
9
+ from typing import Annotated, Any, Literal
10
+
11
+ from pydantic import BaseModel, Field, model_validator
12
+ from typing_extensions import TypedDict
13
+
14
+ from llama_stack_api.inference import OpenAITokenLogProb
15
+ from llama_stack_api.schema_utils import json_schema_type, register_schema
16
+ from llama_stack_api.vector_io import SearchRankingOptions as FileSearchRankingOptions
17
+
18
+ # NOTE(ashwin): this file is literally a copy of the OpenAI responses API schema. We should probably
19
+ # take their YAML and generate this file automatically. Their YAML is available.
20
+
21
+
22
+ @json_schema_type
23
+ class OpenAIResponseError(BaseModel):
24
+ """Error details for failed OpenAI response requests.
25
+
26
+ :param code: Error code identifying the type of failure
27
+ :param message: Human-readable error message describing the failure
28
+ """
29
+
30
+ code: str
31
+ message: str
32
+
33
+
34
+ @json_schema_type
35
+ class OpenAIResponseInputMessageContentText(BaseModel):
36
+ """Text content for input messages in OpenAI response format.
37
+
38
+ :param text: The text content of the input message
39
+ :param type: Content type identifier, always "input_text"
40
+ """
41
+
42
+ text: str
43
+ type: Literal["input_text"] = "input_text"
44
+
45
+
46
+ @json_schema_type
47
+ class OpenAIResponseInputMessageContentImage(BaseModel):
48
+ """Image content for input messages in OpenAI response format.
49
+
50
+ :param detail: Level of detail for image processing, can be "low", "high", or "auto"
51
+ :param type: Content type identifier, always "input_image"
52
+ :param file_id: (Optional) The ID of the file to be sent to the model.
53
+ :param image_url: (Optional) URL of the image content
54
+ """
55
+
56
+ detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
57
+ type: Literal["input_image"] = "input_image"
58
+ file_id: str | None = None
59
+ image_url: str | None = None
60
+
61
+
62
+ @json_schema_type
63
+ class OpenAIResponseInputMessageContentFile(BaseModel):
64
+ """File content for input messages in OpenAI response format.
65
+
66
+ :param type: The type of the input item. Always `input_file`.
67
+ :param file_data: The data of the file to be sent to the model.
68
+ :param file_id: (Optional) The ID of the file to be sent to the model.
69
+ :param file_url: The URL of the file to be sent to the model.
70
+ :param filename: The name of the file to be sent to the model.
71
+ """
72
+
73
+ type: Literal["input_file"] = "input_file"
74
+ file_data: str | None = None
75
+ file_id: str | None = None
76
+ file_url: str | None = None
77
+ filename: str | None = None
78
+
79
+ @model_validator(mode="after")
80
+ def validate_file_source(self) -> "OpenAIResponseInputMessageContentFile":
81
+ if not any([self.file_data, self.file_id, self.file_url, self.filename]):
82
+ raise ValueError(
83
+ "At least one of 'file_data', 'file_id', 'file_url', or 'filename' must be provided for file content"
84
+ )
85
+ return self
86
+
87
+
88
+ OpenAIResponseInputMessageContent = Annotated[
89
+ OpenAIResponseInputMessageContentText
90
+ | OpenAIResponseInputMessageContentImage
91
+ | OpenAIResponseInputMessageContentFile,
92
+ Field(discriminator="type"),
93
+ ]
94
+ register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent")
95
+
96
+
97
+ @json_schema_type
98
+ class OpenAIResponsePrompt(BaseModel):
99
+ """OpenAI compatible Prompt object that is used in OpenAI responses.
100
+
101
+ :param id: Unique identifier of the prompt template
102
+ :param variables: Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types
103
+ like images or files.
104
+ :param version: Version number of the prompt to use (defaults to latest if not specified)
105
+ """
106
+
107
+ id: str
108
+ variables: dict[str, OpenAIResponseInputMessageContent] | None = None
109
+ version: str | None = None
110
+
111
+
112
+ @json_schema_type
113
+ class OpenAIResponseAnnotationFileCitation(BaseModel):
114
+ """File citation annotation for referencing specific files in response content.
115
+
116
+ :param type: Annotation type identifier, always "file_citation"
117
+ :param file_id: Unique identifier of the referenced file
118
+ :param filename: Name of the referenced file
119
+ :param index: Position index of the citation within the content
120
+ """
121
+
122
+ type: Literal["file_citation"] = "file_citation"
123
+ file_id: str
124
+ filename: str
125
+ index: int
126
+
127
+
128
+ @json_schema_type
129
+ class OpenAIResponseAnnotationCitation(BaseModel):
130
+ """URL citation annotation for referencing external web resources.
131
+
132
+ :param type: Annotation type identifier, always "url_citation"
133
+ :param end_index: End position of the citation span in the content
134
+ :param start_index: Start position of the citation span in the content
135
+ :param title: Title of the referenced web resource
136
+ :param url: URL of the referenced web resource
137
+ """
138
+
139
+ type: Literal["url_citation"] = "url_citation"
140
+ end_index: int
141
+ start_index: int
142
+ title: str
143
+ url: str
144
+
145
+
146
+ @json_schema_type
147
+ class OpenAIResponseAnnotationContainerFileCitation(BaseModel):
148
+ type: Literal["container_file_citation"] = "container_file_citation"
149
+ container_id: str
150
+ end_index: int
151
+ file_id: str
152
+ filename: str
153
+ start_index: int
154
+
155
+
156
+ @json_schema_type
157
+ class OpenAIResponseAnnotationFilePath(BaseModel):
158
+ type: Literal["file_path"] = "file_path"
159
+ file_id: str
160
+ index: int
161
+
162
+
163
+ OpenAIResponseAnnotations = Annotated[
164
+ OpenAIResponseAnnotationFileCitation
165
+ | OpenAIResponseAnnotationCitation
166
+ | OpenAIResponseAnnotationContainerFileCitation
167
+ | OpenAIResponseAnnotationFilePath,
168
+ Field(discriminator="type"),
169
+ ]
170
+ register_schema(OpenAIResponseAnnotations, name="OpenAIResponseAnnotations")
171
+
172
+
173
+ @json_schema_type
174
+ class OpenAIResponseOutputMessageContentOutputText(BaseModel):
175
+ text: str
176
+ type: Literal["output_text"] = "output_text"
177
+ annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list)
178
+ logprobs: list[OpenAITokenLogProb] | None = None
179
+
180
+
181
+ @json_schema_type
182
+ class OpenAIResponseContentPartRefusal(BaseModel):
183
+ """Refusal content within a streamed response part.
184
+
185
+ :param type: Content part type identifier, always "refusal"
186
+ :param refusal: Refusal text supplied by the model
187
+ """
188
+
189
+ type: Literal["refusal"] = "refusal"
190
+ refusal: str
191
+
192
+
193
+ OpenAIResponseOutputMessageContent = Annotated[
194
+ OpenAIResponseOutputMessageContentOutputText | OpenAIResponseContentPartRefusal,
195
+ Field(discriminator="type"),
196
+ ]
197
+ register_schema(OpenAIResponseOutputMessageContent, name="OpenAIResponseOutputMessageContent")
198
+
199
+
200
+ @json_schema_type
201
+ class OpenAIResponseMessage(BaseModel):
202
+ """
203
+ Corresponds to the various Message types in the Responses API.
204
+ They are all under one type because the Responses API gives them all
205
+ the same "type" value, and there is no way to tell them apart in certain
206
+ scenarios.
207
+ """
208
+
209
+ content: str | Sequence[OpenAIResponseInputMessageContent] | Sequence[OpenAIResponseOutputMessageContent]
210
+ role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"]
211
+ type: Literal["message"] = "message"
212
+
213
+ # The fields below are not used in all scenarios, but are required in others.
214
+ id: str | None = None
215
+ status: str | None = None
216
+
217
+
218
+ @json_schema_type
219
+ class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
220
+ """Web search tool call output message for OpenAI responses.
221
+
222
+ :param id: Unique identifier for this tool call
223
+ :param status: Current status of the web search operation
224
+ :param type: Tool call type identifier, always "web_search_call"
225
+ """
226
+
227
+ id: str
228
+ status: str
229
+ type: Literal["web_search_call"] = "web_search_call"
230
+
231
+
232
+ class OpenAIResponseOutputMessageFileSearchToolCallResults(BaseModel):
233
+ """Search results returned by the file search operation.
234
+
235
+ :param attributes: (Optional) Key-value attributes associated with the file
236
+ :param file_id: Unique identifier of the file containing the result
237
+ :param filename: Name of the file containing the result
238
+ :param score: Relevance score for this search result (between 0 and 1)
239
+ :param text: Text content of the search result
240
+ """
241
+
242
+ attributes: dict[str, Any]
243
+ file_id: str
244
+ filename: str
245
+ score: float
246
+ text: str
247
+
248
+
249
+ @json_schema_type
250
+ class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
251
+ """File search tool call output message for OpenAI responses.
252
+
253
+ :param id: Unique identifier for this tool call
254
+ :param queries: List of search queries executed
255
+ :param status: Current status of the file search operation
256
+ :param type: Tool call type identifier, always "file_search_call"
257
+ :param results: (Optional) Search results returned by the file search operation
258
+ """
259
+
260
+ id: str
261
+ queries: Sequence[str]
262
+ status: str
263
+ type: Literal["file_search_call"] = "file_search_call"
264
+ results: Sequence[OpenAIResponseOutputMessageFileSearchToolCallResults] | None = None
265
+
266
+
267
+ @json_schema_type
268
+ class OpenAIResponseOutputMessageFunctionToolCall(BaseModel):
269
+ """Function tool call output message for OpenAI responses.
270
+
271
+ :param call_id: Unique identifier for the function call
272
+ :param name: Name of the function being called
273
+ :param arguments: JSON string containing the function arguments
274
+ :param type: Tool call type identifier, always "function_call"
275
+ :param id: (Optional) Additional identifier for the tool call
276
+ :param status: (Optional) Current status of the function call execution
277
+ """
278
+
279
+ call_id: str
280
+ name: str
281
+ arguments: str
282
+ type: Literal["function_call"] = "function_call"
283
+ id: str | None = None
284
+ status: str | None = None
285
+
286
+
287
+ @json_schema_type
288
+ class OpenAIResponseOutputMessageMCPCall(BaseModel):
289
+ """Model Context Protocol (MCP) call output message for OpenAI responses.
290
+
291
+ :param id: Unique identifier for this MCP call
292
+ :param type: Tool call type identifier, always "mcp_call"
293
+ :param arguments: JSON string containing the MCP call arguments
294
+ :param name: Name of the MCP method being called
295
+ :param server_label: Label identifying the MCP server handling the call
296
+ :param error: (Optional) Error message if the MCP call failed
297
+ :param output: (Optional) Output result from the successful MCP call
298
+ """
299
+
300
+ id: str
301
+ type: Literal["mcp_call"] = "mcp_call"
302
+ arguments: str
303
+ name: str
304
+ server_label: str
305
+ error: str | None = None
306
+ output: str | None = None
307
+
308
+
309
+ class MCPListToolsTool(BaseModel):
310
+ """Tool definition returned by MCP list tools operation.
311
+
312
+ :param input_schema: JSON schema defining the tool's input parameters
313
+ :param name: Name of the tool
314
+ :param description: (Optional) Description of what the tool does
315
+ """
316
+
317
+ input_schema: dict[str, Any]
318
+ name: str
319
+ description: str | None = None
320
+
321
+
322
+ @json_schema_type
323
+ class OpenAIResponseOutputMessageMCPListTools(BaseModel):
324
+ """MCP list tools output message containing available tools from an MCP server.
325
+
326
+ :param id: Unique identifier for this MCP list tools operation
327
+ :param type: Tool call type identifier, always "mcp_list_tools"
328
+ :param server_label: Label identifying the MCP server providing the tools
329
+ :param tools: List of available tools provided by the MCP server
330
+ """
331
+
332
+ id: str
333
+ type: Literal["mcp_list_tools"] = "mcp_list_tools"
334
+ server_label: str
335
+ tools: list[MCPListToolsTool]
336
+
337
+
338
+ @json_schema_type
339
+ class OpenAIResponseMCPApprovalRequest(BaseModel):
340
+ """
341
+ A request for human approval of a tool invocation.
342
+ """
343
+
344
+ arguments: str
345
+ id: str
346
+ name: str
347
+ server_label: str
348
+ type: Literal["mcp_approval_request"] = "mcp_approval_request"
349
+
350
+
351
+ @json_schema_type
352
+ class OpenAIResponseMCPApprovalResponse(BaseModel):
353
+ """
354
+ A response to an MCP approval request.
355
+ """
356
+
357
+ approval_request_id: str
358
+ approve: bool
359
+ type: Literal["mcp_approval_response"] = "mcp_approval_response"
360
+ id: str | None = None
361
+ reason: str | None = None
362
+
363
+
364
+ OpenAIResponseOutput = Annotated[
365
+ OpenAIResponseMessage
366
+ | OpenAIResponseOutputMessageWebSearchToolCall
367
+ | OpenAIResponseOutputMessageFileSearchToolCall
368
+ | OpenAIResponseOutputMessageFunctionToolCall
369
+ | OpenAIResponseOutputMessageMCPCall
370
+ | OpenAIResponseOutputMessageMCPListTools
371
+ | OpenAIResponseMCPApprovalRequest,
372
+ Field(discriminator="type"),
373
+ ]
374
+ register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
375
+
376
+
377
+ # This has to be a TypedDict because we need a "schema" field and our strong
378
+ # typing code in the schema generator doesn't support Pydantic aliases. That also
379
+ # means we can't use a discriminator field here, because TypedDicts don't support
380
+ # default values which the strong typing code requires for discriminators.
381
+ class OpenAIResponseTextFormat(TypedDict, total=False):
382
+ """Configuration for Responses API text format.
383
+
384
+ :param type: Must be "text", "json_schema", or "json_object" to identify the format type
385
+ :param name: The name of the response format. Only used for json_schema.
386
+ :param schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema.
387
+ :param description: (Optional) A description of the response format. Only used for json_schema.
388
+ :param strict: (Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema.
389
+ """
390
+
391
+ type: Literal["text"] | Literal["json_schema"] | Literal["json_object"]
392
+ name: str | None
393
+ schema: dict[str, Any] | None
394
+ description: str | None
395
+ strict: bool | None
396
+
397
+
398
+ @json_schema_type
399
+ class OpenAIResponseText(BaseModel):
400
+ """Text response configuration for OpenAI responses.
401
+
402
+ :param format: (Optional) Text format configuration specifying output format requirements
403
+ """
404
+
405
+ format: OpenAIResponseTextFormat | None = None
406
+
407
+
408
+ # Must match type Literals of OpenAIResponseInputToolWebSearch below
409
+ WebSearchToolTypes = ["web_search", "web_search_preview", "web_search_preview_2025_03_11", "web_search_2025_08_26"]
410
+
411
+
412
+ @json_schema_type
413
+ class OpenAIResponseInputToolWebSearch(BaseModel):
414
+ """Web search tool configuration for OpenAI response inputs.
415
+
416
+ :param type: Web search tool type variant to use
417
+ :param search_context_size: (Optional) Size of search context, must be "low", "medium", or "high"
418
+ """
419
+
420
+ # Must match values of WebSearchToolTypes above
421
+ type: (
422
+ Literal["web_search"]
423
+ | Literal["web_search_preview"]
424
+ | Literal["web_search_preview_2025_03_11"]
425
+ | Literal["web_search_2025_08_26"]
426
+ ) = "web_search"
427
+ # TODO: actually use search_context_size somewhere...
428
+ search_context_size: str | None = Field(default="medium", pattern="^low|medium|high$")
429
+ # TODO: add user_location
430
+
431
+
432
+ @json_schema_type
433
+ class OpenAIResponseInputToolFunction(BaseModel):
434
+ """Function tool configuration for OpenAI response inputs.
435
+
436
+ :param type: Tool type identifier, always "function"
437
+ :param name: Name of the function that can be called
438
+ :param description: (Optional) Description of what the function does
439
+ :param parameters: (Optional) JSON schema defining the function's parameters
440
+ :param strict: (Optional) Whether to enforce strict parameter validation
441
+ """
442
+
443
+ type: Literal["function"] = "function"
444
+ name: str
445
+ description: str | None = None
446
+ parameters: dict[str, Any] | None
447
+ strict: bool | None = None
448
+
449
+
450
+ @json_schema_type
451
+ class OpenAIResponseInputToolFileSearch(BaseModel):
452
+ """File search tool configuration for OpenAI response inputs.
453
+
454
+ :param type: Tool type identifier, always "file_search"
455
+ :param vector_store_ids: List of vector store identifiers to search within
456
+ :param filters: (Optional) Additional filters to apply to the search
457
+ :param max_num_results: (Optional) Maximum number of search results to return (1-50)
458
+ :param ranking_options: (Optional) Options for ranking and scoring search results
459
+ """
460
+
461
+ type: Literal["file_search"] = "file_search"
462
+ vector_store_ids: list[str]
463
+ filters: dict[str, Any] | None = None
464
+ max_num_results: int | None = Field(default=10, ge=1, le=50)
465
+ ranking_options: FileSearchRankingOptions | None = None
466
+
467
+
468
+ class ApprovalFilter(BaseModel):
469
+ """Filter configuration for MCP tool approval requirements.
470
+
471
+ :param always: (Optional) List of tool names that always require approval
472
+ :param never: (Optional) List of tool names that never require approval
473
+ """
474
+
475
+ always: list[str] | None = None
476
+ never: list[str] | None = None
477
+
478
+
479
+ class AllowedToolsFilter(BaseModel):
480
+ """Filter configuration for restricting which MCP tools can be used.
481
+
482
+ :param tool_names: (Optional) List of specific tool names that are allowed
483
+ """
484
+
485
+ tool_names: list[str] | None = None
486
+
487
+
488
+ @json_schema_type
489
+ class OpenAIResponseInputToolMCP(BaseModel):
490
+ """Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
491
+
492
+ :param type: Tool type identifier, always "mcp"
493
+ :param server_label: Label to identify this MCP server
494
+ :param server_url: URL endpoint of the MCP server
495
+ :param headers: (Optional) HTTP headers to include when connecting to the server
496
+ :param authorization: (Optional) OAuth access token for authenticating with the MCP server
497
+ :param require_approval: Approval requirement for tool calls ("always", "never", or filter)
498
+ :param allowed_tools: (Optional) Restriction on which tools can be used from this server
499
+ """
500
+
501
+ type: Literal["mcp"] = "mcp"
502
+ server_label: str
503
+ server_url: str
504
+ headers: dict[str, Any] | None = None
505
+ authorization: str | None = Field(default=None, exclude=True)
506
+
507
+ require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
508
+ allowed_tools: list[str] | AllowedToolsFilter | None = None
509
+
510
+
511
+ OpenAIResponseInputTool = Annotated[
512
+ OpenAIResponseInputToolWebSearch
513
+ | OpenAIResponseInputToolFileSearch
514
+ | OpenAIResponseInputToolFunction
515
+ | OpenAIResponseInputToolMCP,
516
+ Field(discriminator="type"),
517
+ ]
518
+ register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
519
+
520
+
521
+ @json_schema_type
522
+ class OpenAIResponseToolMCP(BaseModel):
523
+ """Model Context Protocol (MCP) tool configuration for OpenAI response object.
524
+
525
+ :param type: Tool type identifier, always "mcp"
526
+ :param server_label: Label to identify this MCP server
527
+ :param allowed_tools: (Optional) Restriction on which tools can be used from this server
528
+ """
529
+
530
+ type: Literal["mcp"] = "mcp"
531
+ server_label: str
532
+ allowed_tools: list[str] | AllowedToolsFilter | None = None
533
+
534
+
535
+ OpenAIResponseTool = Annotated[
536
+ OpenAIResponseInputToolWebSearch
537
+ | OpenAIResponseInputToolFileSearch
538
+ | OpenAIResponseInputToolFunction
539
+ | OpenAIResponseToolMCP, # The only type that differes from that in the inputs is the MCP tool
540
+ Field(discriminator="type"),
541
+ ]
542
+ register_schema(OpenAIResponseTool, name="OpenAIResponseTool")
543
+
544
+
545
+ @json_schema_type
546
+ class OpenAIResponseInputToolChoiceAllowedTools(BaseModel):
547
+ """Constrains the tools available to the model to a pre-defined set.
548
+
549
+ :param mode: Constrains the tools available to the model to a pre-defined set
550
+ :param tools: A list of tool definitions that the model should be allowed to call
551
+ :param type: Tool choice type identifier, always "allowed_tools"
552
+ """
553
+
554
+ mode: Literal["auto", "required"] = "auto"
555
+ tools: list[dict[str, str]]
556
+ type: Literal["allowed_tools"] = "allowed_tools"
557
+
558
+
559
+ @json_schema_type
560
+ class OpenAIResponseInputToolChoiceFileSearch(BaseModel):
561
+ """Indicates that the model should use file search to generate a response.
562
+
563
+ :param type: Tool choice type identifier, always "file_search"
564
+ """
565
+
566
+ type: Literal["file_search"] = "file_search"
567
+
568
+
569
+ @json_schema_type
570
+ class OpenAIResponseInputToolChoiceWebSearch(BaseModel):
571
+ """Indicates that the model should use web search to generate a response
572
+
573
+ :param type: Web search tool type variant to use
574
+ """
575
+
576
+ type: (
577
+ Literal["web_search"]
578
+ | Literal["web_search_preview"]
579
+ | Literal["web_search_preview_2025_03_11"]
580
+ | Literal["web_search_2025_08_26"]
581
+ ) = "web_search"
582
+
583
+
584
+ @json_schema_type
585
+ class OpenAIResponseInputToolChoiceFunctionTool(BaseModel):
586
+ """Forces the model to call a specific function.
587
+
588
+ :param name: The name of the function to call
589
+ :param type: Tool choice type identifier, always "function"
590
+ """
591
+
592
+ name: str
593
+ type: Literal["function"] = "function"
594
+
595
+
596
+ @json_schema_type
597
+ class OpenAIResponseInputToolChoiceMCPTool(BaseModel):
598
+ """Forces the model to call a specific tool on a remote MCP server
599
+
600
+ :param server_label: The label of the MCP server to use.
601
+ :param type: Tool choice type identifier, always "mcp"
602
+ :param name: (Optional) The name of the tool to call on the server.
603
+ """
604
+
605
+ server_label: str
606
+ type: Literal["mcp"] = "mcp"
607
+ name: str | None = None
608
+
609
+
610
+ @json_schema_type
611
+ class OpenAIResponseInputToolChoiceCustomTool(BaseModel):
612
+ """Forces the model to call a custom tool.
613
+
614
+ :param type: Tool choice type identifier, always "custom"
615
+ :param name: The name of the custom tool to call.
616
+ """
617
+
618
+ type: Literal["custom"] = "custom"
619
+ name: str
620
+
621
+
622
+ class OpenAIResponseInputToolChoiceMode(str, Enum):
623
+ auto = "auto"
624
+ required = "required"
625
+ none = "none"
626
+
627
+
628
+ OpenAIResponseInputToolChoiceObject = Annotated[
629
+ OpenAIResponseInputToolChoiceAllowedTools
630
+ | OpenAIResponseInputToolChoiceFileSearch
631
+ | OpenAIResponseInputToolChoiceWebSearch
632
+ | OpenAIResponseInputToolChoiceFunctionTool
633
+ | OpenAIResponseInputToolChoiceMCPTool
634
+ | OpenAIResponseInputToolChoiceCustomTool,
635
+ Field(discriminator="type"),
636
+ ]
637
+
638
+ # 3. Final Union without registration or None (Keep it clean)
639
+ OpenAIResponseInputToolChoice = OpenAIResponseInputToolChoiceMode | OpenAIResponseInputToolChoiceObject
640
+
641
+ register_schema(OpenAIResponseInputToolChoice, name="OpenAIResponseInputToolChoice")
642
+
643
+
644
+ class OpenAIResponseUsageOutputTokensDetails(BaseModel):
645
+ """Token details for output tokens in OpenAI response usage.
646
+
647
+ :param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)
648
+ """
649
+
650
+ reasoning_tokens: int | None = None
651
+
652
+
653
+ class OpenAIResponseUsageInputTokensDetails(BaseModel):
654
+ """Token details for input tokens in OpenAI response usage.
655
+
656
+ :param cached_tokens: Number of tokens retrieved from cache
657
+ """
658
+
659
+ cached_tokens: int | None = None
660
+
661
+
662
+ @json_schema_type
663
+ class OpenAIResponseUsage(BaseModel):
664
+ """Usage information for OpenAI response.
665
+
666
+ :param input_tokens: Number of tokens in the input
667
+ :param output_tokens: Number of tokens in the output
668
+ :param total_tokens: Total tokens used (input + output)
669
+ :param input_tokens_details: Detailed breakdown of input token usage
670
+ :param output_tokens_details: Detailed breakdown of output token usage
671
+ """
672
+
673
+ input_tokens: int
674
+ output_tokens: int
675
+ total_tokens: int
676
+ input_tokens_details: OpenAIResponseUsageInputTokensDetails | None = None
677
+ output_tokens_details: OpenAIResponseUsageOutputTokensDetails | None = None
678
+
679
+
680
+ @json_schema_type
681
+ class OpenAIResponseObject(BaseModel):
682
+ """Complete OpenAI response object containing generation results and metadata.
683
+
684
+ :param created_at: Unix timestamp when the response was created
685
+ :param error: (Optional) Error details if the response generation failed
686
+ :param id: Unique identifier for this response
687
+ :param model: Model identifier used for generation
688
+ :param object: Object type identifier, always "response"
689
+ :param output: List of generated output items (messages, tool calls, etc.)
690
+ :param parallel_tool_calls: (Optional) Whether to allow more than one function tool call generated per turn.
691
+ :param previous_response_id: (Optional) ID of the previous response in a conversation
692
+ :param prompt: (Optional) Reference to a prompt template and its variables.
693
+ :param status: Current status of the response generation
694
+ :param temperature: (Optional) Sampling temperature used for generation
695
+ :param text: Text formatting configuration for the response
696
+ :param top_p: (Optional) Nucleus sampling parameter used for generation
697
+ :param tools: (Optional) An array of tools the model may call while generating a response.
698
+ :param tool_choice: (Optional) Tool choice configuration for the response.
699
+ :param truncation: (Optional) Truncation strategy applied to the response
700
+ :param usage: (Optional) Token usage information for the response
701
+ :param instructions: (Optional) System message inserted into the model's context
702
+ :param max_tool_calls: (Optional) Max number of total calls to built-in tools that can be processed in a response
703
+ :param metadata: (Optional) Dictionary of metadata key-value pairs
704
+ """
705
+
706
+ created_at: int
707
+ error: OpenAIResponseError | None = None
708
+ id: str
709
+ model: str
710
+ object: Literal["response"] = "response"
711
+ output: Sequence[OpenAIResponseOutput]
712
+ parallel_tool_calls: bool | None = True
713
+ previous_response_id: str | None = None
714
+ prompt: OpenAIResponsePrompt | None = None
715
+ status: str
716
+ temperature: float | None = None
717
+ # Default to text format to avoid breaking the loading of old responses
718
+ # before the field was added. New responses will have this set always.
719
+ text: OpenAIResponseText = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text"))
720
+ top_p: float | None = None
721
+ tools: Sequence[OpenAIResponseTool] | None = None
722
+ tool_choice: OpenAIResponseInputToolChoice | None = None
723
+ truncation: str | None = None
724
+ usage: OpenAIResponseUsage | None = None
725
+ instructions: str | None = None
726
+ max_tool_calls: int | None = None
727
+ metadata: dict[str, str] | None = None
728
+
729
+
730
+ @json_schema_type
731
+ class OpenAIDeleteResponseObject(BaseModel):
732
+ """Response object confirming deletion of an OpenAI response.
733
+
734
+ :param id: Unique identifier of the deleted response
735
+ :param object: Object type identifier, always "response"
736
+ :param deleted: Deletion confirmation flag, always True
737
+ """
738
+
739
+ id: str
740
+ object: Literal["response"] = "response"
741
+ deleted: bool = True
742
+
743
+
744
+ @json_schema_type
745
+ class OpenAIResponseObjectStreamResponseCreated(BaseModel):
746
+ """Streaming event indicating a new response has been created.
747
+
748
+ :param response: The response object that was created
749
+ :param type: Event type identifier, always "response.created"
750
+ """
751
+
752
+ response: OpenAIResponseObject
753
+ type: Literal["response.created"] = "response.created"
754
+
755
+
756
+ @json_schema_type
757
+ class OpenAIResponseObjectStreamResponseInProgress(BaseModel):
758
+ """Streaming event indicating the response remains in progress.
759
+
760
+ :param response: Current response state while in progress
761
+ :param sequence_number: Sequential number for ordering streaming events
762
+ :param type: Event type identifier, always "response.in_progress"
763
+ """
764
+
765
+ response: OpenAIResponseObject
766
+ sequence_number: int
767
+ type: Literal["response.in_progress"] = "response.in_progress"
768
+
769
+
770
+ @json_schema_type
771
+ class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
772
+ """Streaming event indicating a response has been completed.
773
+
774
+ :param response: Completed response object
775
+ :param type: Event type identifier, always "response.completed"
776
+ """
777
+
778
+ response: OpenAIResponseObject
779
+ type: Literal["response.completed"] = "response.completed"
780
+
781
+
782
+ @json_schema_type
783
+ class OpenAIResponseObjectStreamResponseIncomplete(BaseModel):
784
+ """Streaming event emitted when a response ends in an incomplete state.
785
+
786
+ :param response: Response object describing the incomplete state
787
+ :param sequence_number: Sequential number for ordering streaming events
788
+ :param type: Event type identifier, always "response.incomplete"
789
+ """
790
+
791
+ response: OpenAIResponseObject
792
+ sequence_number: int
793
+ type: Literal["response.incomplete"] = "response.incomplete"
794
+
795
+
796
+ @json_schema_type
797
+ class OpenAIResponseObjectStreamResponseFailed(BaseModel):
798
+ """Streaming event emitted when a response fails.
799
+
800
+ :param response: Response object describing the failure
801
+ :param sequence_number: Sequential number for ordering streaming events
802
+ :param type: Event type identifier, always "response.failed"
803
+ """
804
+
805
+ response: OpenAIResponseObject
806
+ sequence_number: int
807
+ type: Literal["response.failed"] = "response.failed"
808
+
809
+
810
+ @json_schema_type
811
+ class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel):
812
+ """Streaming event for when a new output item is added to the response.
813
+
814
+ :param response_id: Unique identifier of the response containing this output
815
+ :param item: The output item that was added (message, tool call, etc.)
816
+ :param output_index: Index position of this item in the output list
817
+ :param sequence_number: Sequential number for ordering streaming events
818
+ :param type: Event type identifier, always "response.output_item.added"
819
+ """
820
+
821
+ response_id: str
822
+ item: OpenAIResponseOutput
823
+ output_index: int
824
+ sequence_number: int
825
+ type: Literal["response.output_item.added"] = "response.output_item.added"
826
+
827
+
828
+ @json_schema_type
829
+ class OpenAIResponseObjectStreamResponseOutputItemDone(BaseModel):
830
+ """Streaming event for when an output item is completed.
831
+
832
+ :param response_id: Unique identifier of the response containing this output
833
+ :param item: The completed output item (message, tool call, etc.)
834
+ :param output_index: Index position of this item in the output list
835
+ :param sequence_number: Sequential number for ordering streaming events
836
+ :param type: Event type identifier, always "response.output_item.done"
837
+ """
838
+
839
+ response_id: str
840
+ item: OpenAIResponseOutput
841
+ output_index: int
842
+ sequence_number: int
843
+ type: Literal["response.output_item.done"] = "response.output_item.done"
844
+
845
+
846
+ @json_schema_type
847
+ class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel):
848
+ """Streaming event for incremental text content updates.
849
+
850
+ :param content_index: Index position within the text content
851
+ :param delta: Incremental text content being added
852
+ :param item_id: Unique identifier of the output item being updated
853
+ :param logprobs: (Optional) Token log probability details
854
+ :param output_index: Index position of the item in the output list
855
+ :param sequence_number: Sequential number for ordering streaming events
856
+ :param type: Event type identifier, always "response.output_text.delta"
857
+ """
858
+
859
+ content_index: int
860
+ delta: str
861
+ item_id: str
862
+ logprobs: list[OpenAITokenLogProb] | None = None
863
+ output_index: int
864
+ sequence_number: int
865
+ type: Literal["response.output_text.delta"] = "response.output_text.delta"
866
+
867
+
868
+ @json_schema_type
869
+ class OpenAIResponseObjectStreamResponseOutputTextDone(BaseModel):
870
+ """Streaming event for when text output is completed.
871
+
872
+ :param content_index: Index position within the text content
873
+ :param text: Final complete text content of the output item
874
+ :param item_id: Unique identifier of the completed output item
875
+ :param output_index: Index position of the item in the output list
876
+ :param sequence_number: Sequential number for ordering streaming events
877
+ :param type: Event type identifier, always "response.output_text.done"
878
+ """
879
+
880
+ content_index: int
881
+ text: str # final text of the output item
882
+ item_id: str
883
+ output_index: int
884
+ sequence_number: int
885
+ type: Literal["response.output_text.done"] = "response.output_text.done"
886
+
887
+
888
+ @json_schema_type
889
+ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(BaseModel):
890
+ """Streaming event for incremental function call argument updates.
891
+
892
+ :param delta: Incremental function call arguments being added
893
+ :param item_id: Unique identifier of the function call being updated
894
+ :param output_index: Index position of the item in the output list
895
+ :param sequence_number: Sequential number for ordering streaming events
896
+ :param type: Event type identifier, always "response.function_call_arguments.delta"
897
+ """
898
+
899
+ delta: str
900
+ item_id: str
901
+ output_index: int
902
+ sequence_number: int
903
+ type: Literal["response.function_call_arguments.delta"] = "response.function_call_arguments.delta"
904
+
905
+
906
+ @json_schema_type
907
+ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone(BaseModel):
908
+ """Streaming event for when function call arguments are completed.
909
+
910
+ :param arguments: Final complete arguments JSON string for the function call
911
+ :param item_id: Unique identifier of the completed function call
912
+ :param output_index: Index position of the item in the output list
913
+ :param sequence_number: Sequential number for ordering streaming events
914
+ :param type: Event type identifier, always "response.function_call_arguments.done"
915
+ """
916
+
917
+ arguments: str # final arguments of the function call
918
+ item_id: str
919
+ output_index: int
920
+ sequence_number: int
921
+ type: Literal["response.function_call_arguments.done"] = "response.function_call_arguments.done"
922
+
923
+
924
+ @json_schema_type
925
+ class OpenAIResponseObjectStreamResponseWebSearchCallInProgress(BaseModel):
926
+ """Streaming event for web search calls in progress.
927
+
928
+ :param item_id: Unique identifier of the web search call
929
+ :param output_index: Index position of the item in the output list
930
+ :param sequence_number: Sequential number for ordering streaming events
931
+ :param type: Event type identifier, always "response.web_search_call.in_progress"
932
+ """
933
+
934
+ item_id: str
935
+ output_index: int
936
+ sequence_number: int
937
+ type: Literal["response.web_search_call.in_progress"] = "response.web_search_call.in_progress"
938
+
939
+
940
+ @json_schema_type
941
+ class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel):
942
+ item_id: str
943
+ output_index: int
944
+ sequence_number: int
945
+ type: Literal["response.web_search_call.searching"] = "response.web_search_call.searching"
946
+
947
+
948
+ @json_schema_type
949
+ class OpenAIResponseObjectStreamResponseWebSearchCallCompleted(BaseModel):
950
+ """Streaming event for completed web search calls.
951
+
952
+ :param item_id: Unique identifier of the completed web search call
953
+ :param output_index: Index position of the item in the output list
954
+ :param sequence_number: Sequential number for ordering streaming events
955
+ :param type: Event type identifier, always "response.web_search_call.completed"
956
+ """
957
+
958
+ item_id: str
959
+ output_index: int
960
+ sequence_number: int
961
+ type: Literal["response.web_search_call.completed"] = "response.web_search_call.completed"
962
+
963
+
964
+ @json_schema_type
965
+ class OpenAIResponseObjectStreamResponseMcpListToolsInProgress(BaseModel):
966
+ sequence_number: int
967
+ type: Literal["response.mcp_list_tools.in_progress"] = "response.mcp_list_tools.in_progress"
968
+
969
+
970
+ @json_schema_type
971
+ class OpenAIResponseObjectStreamResponseMcpListToolsFailed(BaseModel):
972
+ sequence_number: int
973
+ type: Literal["response.mcp_list_tools.failed"] = "response.mcp_list_tools.failed"
974
+
975
+
976
+ @json_schema_type
977
+ class OpenAIResponseObjectStreamResponseMcpListToolsCompleted(BaseModel):
978
+ sequence_number: int
979
+ type: Literal["response.mcp_list_tools.completed"] = "response.mcp_list_tools.completed"
980
+
981
+
982
+ @json_schema_type
983
+ class OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta(BaseModel):
984
+ delta: str
985
+ item_id: str
986
+ output_index: int
987
+ sequence_number: int
988
+ type: Literal["response.mcp_call.arguments.delta"] = "response.mcp_call.arguments.delta"
989
+
990
+
991
+ @json_schema_type
992
+ class OpenAIResponseObjectStreamResponseMcpCallArgumentsDone(BaseModel):
993
+ arguments: str # final arguments of the MCP call
994
+ item_id: str
995
+ output_index: int
996
+ sequence_number: int
997
+ type: Literal["response.mcp_call.arguments.done"] = "response.mcp_call.arguments.done"
998
+
999
+
1000
+ @json_schema_type
1001
+ class OpenAIResponseObjectStreamResponseMcpCallInProgress(BaseModel):
1002
+ """Streaming event for MCP calls in progress.
1003
+
1004
+ :param item_id: Unique identifier of the MCP call
1005
+ :param output_index: Index position of the item in the output list
1006
+ :param sequence_number: Sequential number for ordering streaming events
1007
+ :param type: Event type identifier, always "response.mcp_call.in_progress"
1008
+ """
1009
+
1010
+ item_id: str
1011
+ output_index: int
1012
+ sequence_number: int
1013
+ type: Literal["response.mcp_call.in_progress"] = "response.mcp_call.in_progress"
1014
+
1015
+
1016
+ @json_schema_type
1017
+ class OpenAIResponseObjectStreamResponseMcpCallFailed(BaseModel):
1018
+ """Streaming event for failed MCP calls.
1019
+
1020
+ :param sequence_number: Sequential number for ordering streaming events
1021
+ :param type: Event type identifier, always "response.mcp_call.failed"
1022
+ """
1023
+
1024
+ sequence_number: int
1025
+ type: Literal["response.mcp_call.failed"] = "response.mcp_call.failed"
1026
+
1027
+
1028
+ @json_schema_type
1029
+ class OpenAIResponseObjectStreamResponseMcpCallCompleted(BaseModel):
1030
+ """Streaming event for completed MCP calls.
1031
+
1032
+ :param sequence_number: Sequential number for ordering streaming events
1033
+ :param type: Event type identifier, always "response.mcp_call.completed"
1034
+ """
1035
+
1036
+ sequence_number: int
1037
+ type: Literal["response.mcp_call.completed"] = "response.mcp_call.completed"
1038
+
1039
+
1040
+ @json_schema_type
1041
+ class OpenAIResponseContentPartOutputText(BaseModel):
1042
+ """Text content within a streamed response part.
1043
+
1044
+ :param type: Content part type identifier, always "output_text"
1045
+ :param text: Text emitted for this content part
1046
+ :param annotations: Structured annotations associated with the text
1047
+ :param logprobs: (Optional) Token log probability details
1048
+ """
1049
+
1050
+ type: Literal["output_text"] = "output_text"
1051
+ text: str
1052
+ annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list)
1053
+ logprobs: list[OpenAITokenLogProb] | None = None
1054
+
1055
+
1056
+ @json_schema_type
1057
+ class OpenAIResponseContentPartReasoningText(BaseModel):
1058
+ """Reasoning text emitted as part of a streamed response.
1059
+
1060
+ :param type: Content part type identifier, always "reasoning_text"
1061
+ :param text: Reasoning text supplied by the model
1062
+ """
1063
+
1064
+ type: Literal["reasoning_text"] = "reasoning_text"
1065
+ text: str
1066
+
1067
+
1068
+ OpenAIResponseContentPart = Annotated[
1069
+ OpenAIResponseContentPartOutputText | OpenAIResponseContentPartRefusal | OpenAIResponseContentPartReasoningText,
1070
+ Field(discriminator="type"),
1071
+ ]
1072
+ register_schema(OpenAIResponseContentPart, name="OpenAIResponseContentPart")
1073
+
1074
+
1075
+ @json_schema_type
1076
+ class OpenAIResponseObjectStreamResponseContentPartAdded(BaseModel):
1077
+ """Streaming event for when a new content part is added to a response item.
1078
+
1079
+ :param content_index: Index position of the part within the content array
1080
+ :param response_id: Unique identifier of the response containing this content
1081
+ :param item_id: Unique identifier of the output item containing this content part
1082
+ :param output_index: Index position of the output item in the response
1083
+ :param part: The content part that was added
1084
+ :param sequence_number: Sequential number for ordering streaming events
1085
+ :param type: Event type identifier, always "response.content_part.added"
1086
+ """
1087
+
1088
+ content_index: int
1089
+ response_id: str
1090
+ item_id: str
1091
+ output_index: int
1092
+ part: OpenAIResponseContentPart
1093
+ sequence_number: int
1094
+ type: Literal["response.content_part.added"] = "response.content_part.added"
1095
+
1096
+
1097
+ @json_schema_type
1098
+ class OpenAIResponseObjectStreamResponseContentPartDone(BaseModel):
1099
+ """Streaming event for when a content part is completed.
1100
+
1101
+ :param content_index: Index position of the part within the content array
1102
+ :param response_id: Unique identifier of the response containing this content
1103
+ :param item_id: Unique identifier of the output item containing this content part
1104
+ :param output_index: Index position of the output item in the response
1105
+ :param part: The completed content part
1106
+ :param sequence_number: Sequential number for ordering streaming events
1107
+ :param type: Event type identifier, always "response.content_part.done"
1108
+ """
1109
+
1110
+ content_index: int
1111
+ response_id: str
1112
+ item_id: str
1113
+ output_index: int
1114
+ part: OpenAIResponseContentPart
1115
+ sequence_number: int
1116
+ type: Literal["response.content_part.done"] = "response.content_part.done"
1117
+
1118
+
1119
+ @json_schema_type
1120
+ class OpenAIResponseObjectStreamResponseReasoningTextDelta(BaseModel):
1121
+ """Streaming event for incremental reasoning text updates.
1122
+
1123
+ :param content_index: Index position of the reasoning content part
1124
+ :param delta: Incremental reasoning text being added
1125
+ :param item_id: Unique identifier of the output item being updated
1126
+ :param output_index: Index position of the item in the output list
1127
+ :param sequence_number: Sequential number for ordering streaming events
1128
+ :param type: Event type identifier, always "response.reasoning_text.delta"
1129
+ """
1130
+
1131
+ content_index: int
1132
+ delta: str
1133
+ item_id: str
1134
+ output_index: int
1135
+ sequence_number: int
1136
+ type: Literal["response.reasoning_text.delta"] = "response.reasoning_text.delta"
1137
+
1138
+
1139
+ @json_schema_type
1140
+ class OpenAIResponseObjectStreamResponseReasoningTextDone(BaseModel):
1141
+ """Streaming event for when reasoning text is completed.
1142
+
1143
+ :param content_index: Index position of the reasoning content part
1144
+ :param text: Final complete reasoning text
1145
+ :param item_id: Unique identifier of the completed output item
1146
+ :param output_index: Index position of the item in the output list
1147
+ :param sequence_number: Sequential number for ordering streaming events
1148
+ :param type: Event type identifier, always "response.reasoning_text.done"
1149
+ """
1150
+
1151
+ content_index: int
1152
+ text: str
1153
+ item_id: str
1154
+ output_index: int
1155
+ sequence_number: int
1156
+ type: Literal["response.reasoning_text.done"] = "response.reasoning_text.done"
1157
+
1158
+
1159
+ @json_schema_type
1160
+ class OpenAIResponseContentPartReasoningSummary(BaseModel):
1161
+ """Reasoning summary part in a streamed response.
1162
+
1163
+ :param type: Content part type identifier, always "summary_text"
1164
+ :param text: Summary text
1165
+ """
1166
+
1167
+ type: Literal["summary_text"] = "summary_text"
1168
+ text: str
1169
+
1170
+
1171
+ @json_schema_type
1172
+ class OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded(BaseModel):
1173
+ """Streaming event for when a new reasoning summary part is added.
1174
+
1175
+ :param item_id: Unique identifier of the output item
1176
+ :param output_index: Index position of the output item
1177
+ :param part: The summary part that was added
1178
+ :param sequence_number: Sequential number for ordering streaming events
1179
+ :param summary_index: Index of the summary part within the reasoning summary
1180
+ :param type: Event type identifier, always "response.reasoning_summary_part.added"
1181
+ """
1182
+
1183
+ item_id: str
1184
+ output_index: int
1185
+ part: OpenAIResponseContentPartReasoningSummary
1186
+ sequence_number: int
1187
+ summary_index: int
1188
+ type: Literal["response.reasoning_summary_part.added"] = "response.reasoning_summary_part.added"
1189
+
1190
+
1191
+ @json_schema_type
1192
+ class OpenAIResponseObjectStreamResponseReasoningSummaryPartDone(BaseModel):
1193
+ """Streaming event for when a reasoning summary part is completed.
1194
+
1195
+ :param item_id: Unique identifier of the output item
1196
+ :param output_index: Index position of the output item
1197
+ :param part: The completed summary part
1198
+ :param sequence_number: Sequential number for ordering streaming events
1199
+ :param summary_index: Index of the summary part within the reasoning summary
1200
+ :param type: Event type identifier, always "response.reasoning_summary_part.done"
1201
+ """
1202
+
1203
+ item_id: str
1204
+ output_index: int
1205
+ part: OpenAIResponseContentPartReasoningSummary
1206
+ sequence_number: int
1207
+ summary_index: int
1208
+ type: Literal["response.reasoning_summary_part.done"] = "response.reasoning_summary_part.done"
1209
+
1210
+
1211
+ @json_schema_type
1212
+ class OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta(BaseModel):
1213
+ """Streaming event for incremental reasoning summary text updates.
1214
+
1215
+ :param delta: Incremental summary text being added
1216
+ :param item_id: Unique identifier of the output item
1217
+ :param output_index: Index position of the output item
1218
+ :param sequence_number: Sequential number for ordering streaming events
1219
+ :param summary_index: Index of the summary part within the reasoning summary
1220
+ :param type: Event type identifier, always "response.reasoning_summary_text.delta"
1221
+ """
1222
+
1223
+ delta: str
1224
+ item_id: str
1225
+ output_index: int
1226
+ sequence_number: int
1227
+ summary_index: int
1228
+ type: Literal["response.reasoning_summary_text.delta"] = "response.reasoning_summary_text.delta"
1229
+
1230
+
1231
+ @json_schema_type
1232
+ class OpenAIResponseObjectStreamResponseReasoningSummaryTextDone(BaseModel):
1233
+ """Streaming event for when reasoning summary text is completed.
1234
+
1235
+ :param text: Final complete summary text
1236
+ :param item_id: Unique identifier of the output item
1237
+ :param output_index: Index position of the output item
1238
+ :param sequence_number: Sequential number for ordering streaming events
1239
+ :param summary_index: Index of the summary part within the reasoning summary
1240
+ :param type: Event type identifier, always "response.reasoning_summary_text.done"
1241
+ """
1242
+
1243
+ text: str
1244
+ item_id: str
1245
+ output_index: int
1246
+ sequence_number: int
1247
+ summary_index: int
1248
+ type: Literal["response.reasoning_summary_text.done"] = "response.reasoning_summary_text.done"
1249
+
1250
+
1251
+ @json_schema_type
1252
+ class OpenAIResponseObjectStreamResponseRefusalDelta(BaseModel):
1253
+ """Streaming event for incremental refusal text updates.
1254
+
1255
+ :param content_index: Index position of the content part
1256
+ :param delta: Incremental refusal text being added
1257
+ :param item_id: Unique identifier of the output item
1258
+ :param output_index: Index position of the item in the output list
1259
+ :param sequence_number: Sequential number for ordering streaming events
1260
+ :param type: Event type identifier, always "response.refusal.delta"
1261
+ """
1262
+
1263
+ content_index: int
1264
+ delta: str
1265
+ item_id: str
1266
+ output_index: int
1267
+ sequence_number: int
1268
+ type: Literal["response.refusal.delta"] = "response.refusal.delta"
1269
+
1270
+
1271
+ @json_schema_type
1272
+ class OpenAIResponseObjectStreamResponseRefusalDone(BaseModel):
1273
+ """Streaming event for when refusal text is completed.
1274
+
1275
+ :param content_index: Index position of the content part
1276
+ :param refusal: Final complete refusal text
1277
+ :param item_id: Unique identifier of the output item
1278
+ :param output_index: Index position of the item in the output list
1279
+ :param sequence_number: Sequential number for ordering streaming events
1280
+ :param type: Event type identifier, always "response.refusal.done"
1281
+ """
1282
+
1283
+ content_index: int
1284
+ refusal: str
1285
+ item_id: str
1286
+ output_index: int
1287
+ sequence_number: int
1288
+ type: Literal["response.refusal.done"] = "response.refusal.done"
1289
+
1290
+
1291
+ @json_schema_type
1292
+ class OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded(BaseModel):
1293
+ """Streaming event for when an annotation is added to output text.
1294
+
1295
+ :param item_id: Unique identifier of the item to which the annotation is being added
1296
+ :param output_index: Index position of the output item in the response's output array
1297
+ :param content_index: Index position of the content part within the output item
1298
+ :param annotation_index: Index of the annotation within the content part
1299
+ :param annotation: The annotation object being added
1300
+ :param sequence_number: Sequential number for ordering streaming events
1301
+ :param type: Event type identifier, always "response.output_text.annotation.added"
1302
+ """
1303
+
1304
+ item_id: str
1305
+ output_index: int
1306
+ content_index: int
1307
+ annotation_index: int
1308
+ annotation: OpenAIResponseAnnotations
1309
+ sequence_number: int
1310
+ type: Literal["response.output_text.annotation.added"] = "response.output_text.annotation.added"
1311
+
1312
+
1313
+ @json_schema_type
1314
+ class OpenAIResponseObjectStreamResponseFileSearchCallInProgress(BaseModel):
1315
+ """Streaming event for file search calls in progress.
1316
+
1317
+ :param item_id: Unique identifier of the file search call
1318
+ :param output_index: Index position of the item in the output list
1319
+ :param sequence_number: Sequential number for ordering streaming events
1320
+ :param type: Event type identifier, always "response.file_search_call.in_progress"
1321
+ """
1322
+
1323
+ item_id: str
1324
+ output_index: int
1325
+ sequence_number: int
1326
+ type: Literal["response.file_search_call.in_progress"] = "response.file_search_call.in_progress"
1327
+
1328
+
1329
+ @json_schema_type
1330
+ class OpenAIResponseObjectStreamResponseFileSearchCallSearching(BaseModel):
1331
+ """Streaming event for file search currently searching.
1332
+
1333
+ :param item_id: Unique identifier of the file search call
1334
+ :param output_index: Index position of the item in the output list
1335
+ :param sequence_number: Sequential number for ordering streaming events
1336
+ :param type: Event type identifier, always "response.file_search_call.searching"
1337
+ """
1338
+
1339
+ item_id: str
1340
+ output_index: int
1341
+ sequence_number: int
1342
+ type: Literal["response.file_search_call.searching"] = "response.file_search_call.searching"
1343
+
1344
+
1345
+ @json_schema_type
1346
+ class OpenAIResponseObjectStreamResponseFileSearchCallCompleted(BaseModel):
1347
+ """Streaming event for completed file search calls.
1348
+
1349
+ :param item_id: Unique identifier of the completed file search call
1350
+ :param output_index: Index position of the item in the output list
1351
+ :param sequence_number: Sequential number for ordering streaming events
1352
+ :param type: Event type identifier, always "response.file_search_call.completed"
1353
+ """
1354
+
1355
+ item_id: str
1356
+ output_index: int
1357
+ sequence_number: int
1358
+ type: Literal["response.file_search_call.completed"] = "response.file_search_call.completed"
1359
+
1360
+
1361
+ OpenAIResponseObjectStream = Annotated[
1362
+ OpenAIResponseObjectStreamResponseCreated
1363
+ | OpenAIResponseObjectStreamResponseInProgress
1364
+ | OpenAIResponseObjectStreamResponseOutputItemAdded
1365
+ | OpenAIResponseObjectStreamResponseOutputItemDone
1366
+ | OpenAIResponseObjectStreamResponseOutputTextDelta
1367
+ | OpenAIResponseObjectStreamResponseOutputTextDone
1368
+ | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta
1369
+ | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone
1370
+ | OpenAIResponseObjectStreamResponseWebSearchCallInProgress
1371
+ | OpenAIResponseObjectStreamResponseWebSearchCallSearching
1372
+ | OpenAIResponseObjectStreamResponseWebSearchCallCompleted
1373
+ | OpenAIResponseObjectStreamResponseMcpListToolsInProgress
1374
+ | OpenAIResponseObjectStreamResponseMcpListToolsFailed
1375
+ | OpenAIResponseObjectStreamResponseMcpListToolsCompleted
1376
+ | OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta
1377
+ | OpenAIResponseObjectStreamResponseMcpCallArgumentsDone
1378
+ | OpenAIResponseObjectStreamResponseMcpCallInProgress
1379
+ | OpenAIResponseObjectStreamResponseMcpCallFailed
1380
+ | OpenAIResponseObjectStreamResponseMcpCallCompleted
1381
+ | OpenAIResponseObjectStreamResponseContentPartAdded
1382
+ | OpenAIResponseObjectStreamResponseContentPartDone
1383
+ | OpenAIResponseObjectStreamResponseReasoningTextDelta
1384
+ | OpenAIResponseObjectStreamResponseReasoningTextDone
1385
+ | OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded
1386
+ | OpenAIResponseObjectStreamResponseReasoningSummaryPartDone
1387
+ | OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta
1388
+ | OpenAIResponseObjectStreamResponseReasoningSummaryTextDone
1389
+ | OpenAIResponseObjectStreamResponseRefusalDelta
1390
+ | OpenAIResponseObjectStreamResponseRefusalDone
1391
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded
1392
+ | OpenAIResponseObjectStreamResponseFileSearchCallInProgress
1393
+ | OpenAIResponseObjectStreamResponseFileSearchCallSearching
1394
+ | OpenAIResponseObjectStreamResponseFileSearchCallCompleted
1395
+ | OpenAIResponseObjectStreamResponseIncomplete
1396
+ | OpenAIResponseObjectStreamResponseFailed
1397
+ | OpenAIResponseObjectStreamResponseCompleted,
1398
+ Field(discriminator="type"),
1399
+ ]
1400
+ register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream")
1401
+
1402
+
1403
+ @json_schema_type
1404
+ class OpenAIResponseInputFunctionToolCallOutput(BaseModel):
1405
+ """
1406
+ This represents the output of a function call that gets passed back to the model.
1407
+ """
1408
+
1409
+ call_id: str
1410
+ output: str
1411
+ type: Literal["function_call_output"] = "function_call_output"
1412
+ id: str | None = None
1413
+ status: str | None = None
1414
+
1415
+
1416
+ OpenAIResponseInput = Annotated[
1417
+ # Responses API allows output messages to be passed in as input
1418
+ OpenAIResponseOutput
1419
+ | OpenAIResponseInputFunctionToolCallOutput
1420
+ | OpenAIResponseMCPApprovalResponse
1421
+ | OpenAIResponseMessage,
1422
+ Field(union_mode="left_to_right"),
1423
+ ]
1424
+ register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
1425
+
1426
+
1427
+ @json_schema_type
1428
+ class ListOpenAIResponseInputItem(BaseModel):
1429
+ """List container for OpenAI response input items.
1430
+
1431
+ :param data: List of input items
1432
+ :param object: Object type identifier, always "list"
1433
+ """
1434
+
1435
+ data: Sequence[OpenAIResponseInput]
1436
+ object: Literal["list"] = "list"
1437
+
1438
+
1439
+ @json_schema_type
1440
+ class OpenAIResponseObjectWithInput(OpenAIResponseObject):
1441
+ """OpenAI response object extended with input context information.
1442
+
1443
+ :param input: List of input items that led to this response
1444
+ """
1445
+
1446
+ input: Sequence[OpenAIResponseInput]
1447
+
1448
+ def to_response_object(self) -> OpenAIResponseObject:
1449
+ """Convert to OpenAIResponseObject by excluding input field."""
1450
+ return OpenAIResponseObject(**{k: v for k, v in self.model_dump().items() if k != "input"})
1451
+
1452
+
1453
+ @json_schema_type
1454
+ class ListOpenAIResponseObject(BaseModel):
1455
+ """Paginated list of OpenAI response objects with navigation metadata.
1456
+
1457
+ :param data: List of response objects with their input context
1458
+ :param has_more: Whether there are more results available beyond this page
1459
+ :param first_id: Identifier of the first item in this page
1460
+ :param last_id: Identifier of the last item in this page
1461
+ :param object: Object type identifier, always "list"
1462
+ """
1463
+
1464
+ data: Sequence[OpenAIResponseObjectWithInput]
1465
+ has_more: bool
1466
+ first_id: str
1467
+ last_id: str
1468
+ object: Literal["list"] = "list"