orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +332 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +258 -0
- orq_ai_sdk/embeddings.py +238 -0
- orq_ai_sdk/generations.py +272 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5341 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1922 -384
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1375 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +1890 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +384 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +504 -0
- orq_ai_sdk/models/createimageop.py +208 -117
- orq_ai_sdk/models/createimagevariationop.py +486 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2748 -1252
- orq_ai_sdk/models/creatererankop.py +416 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +316 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +562 -0
- orq_ai_sdk/models/createtranslationop.py +540 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1690 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1673 -230
- orq_ai_sdk/models/getpromptversionop.py +1670 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +793 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1684 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1451 -197
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1428 -195
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1951 -404
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2844 -1450
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +660 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +232 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +251 -0
- orq_ai_sdk/transcriptions.py +326 -0
- orq_ai_sdk/translations.py +298 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +254 -0
- orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
- orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -0,0 +1,2567 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from orq_ai_sdk.types import (
|
|
5
|
+
BaseModel,
|
|
6
|
+
Nullable,
|
|
7
|
+
OptionalNullable,
|
|
8
|
+
UNSET,
|
|
9
|
+
UNSET_SENTINEL,
|
|
10
|
+
)
|
|
11
|
+
from orq_ai_sdk.utils import eventstreaming, get_discriminator
|
|
12
|
+
import pydantic
|
|
13
|
+
from pydantic import ConfigDict, Discriminator, Tag, model_serializer
|
|
14
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
15
|
+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
Effort = Literal[
|
|
19
|
+
"low",
|
|
20
|
+
"medium",
|
|
21
|
+
"high",
|
|
22
|
+
]
|
|
23
|
+
r"""The effort level for reasoning (o3-mini model only)"""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ReasoningTypedDict(TypedDict):
|
|
27
|
+
r"""Configuration for reasoning models"""
|
|
28
|
+
|
|
29
|
+
effort: NotRequired[Effort]
|
|
30
|
+
r"""The effort level for reasoning (o3-mini model only)"""
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Reasoning(BaseModel):
|
|
34
|
+
r"""Configuration for reasoning models"""
|
|
35
|
+
|
|
36
|
+
effort: Optional[Effort] = None
|
|
37
|
+
r"""The effort level for reasoning (o3-mini model only)"""
|
|
38
|
+
|
|
39
|
+
@model_serializer(mode="wrap")
|
|
40
|
+
def serialize_model(self, handler):
|
|
41
|
+
optional_fields = set(["effort"])
|
|
42
|
+
serialized = handler(self)
|
|
43
|
+
m = {}
|
|
44
|
+
|
|
45
|
+
for n, f in type(self).model_fields.items():
|
|
46
|
+
k = f.alias or n
|
|
47
|
+
val = serialized.get(k)
|
|
48
|
+
|
|
49
|
+
if val != UNSET_SENTINEL:
|
|
50
|
+
if val is not None or k not in optional_fields:
|
|
51
|
+
m[k] = val
|
|
52
|
+
|
|
53
|
+
return m
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
CreateResponseFormatRouterResponsesType = Literal["json_schema",]
|
|
57
|
+
r"""Ensures the response matches a supplied JSON schema"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class Format3TypedDict(TypedDict):
|
|
61
|
+
type: CreateResponseFormatRouterResponsesType
|
|
62
|
+
r"""Ensures the response matches a supplied JSON schema"""
|
|
63
|
+
name: str
|
|
64
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
65
|
+
schema_: Dict[str, Any]
|
|
66
|
+
r"""The JSON schema to validate the response against"""
|
|
67
|
+
description: NotRequired[str]
|
|
68
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
69
|
+
strict: NotRequired[bool]
|
|
70
|
+
r"""Whether to enable strict `schema` adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when `strict` is `true`"""
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class Format3(BaseModel):
|
|
74
|
+
type: CreateResponseFormatRouterResponsesType
|
|
75
|
+
r"""Ensures the response matches a supplied JSON schema"""
|
|
76
|
+
|
|
77
|
+
name: str
|
|
78
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
79
|
+
|
|
80
|
+
schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
|
|
81
|
+
r"""The JSON schema to validate the response against"""
|
|
82
|
+
|
|
83
|
+
description: Optional[str] = None
|
|
84
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
85
|
+
|
|
86
|
+
strict: Optional[bool] = True
|
|
87
|
+
r"""Whether to enable strict `schema` adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when `strict` is `true`"""
|
|
88
|
+
|
|
89
|
+
@model_serializer(mode="wrap")
|
|
90
|
+
def serialize_model(self, handler):
|
|
91
|
+
optional_fields = set(["description", "strict"])
|
|
92
|
+
serialized = handler(self)
|
|
93
|
+
m = {}
|
|
94
|
+
|
|
95
|
+
for n, f in type(self).model_fields.items():
|
|
96
|
+
k = f.alias or n
|
|
97
|
+
val = serialized.get(k)
|
|
98
|
+
|
|
99
|
+
if val != UNSET_SENTINEL:
|
|
100
|
+
if val is not None or k not in optional_fields:
|
|
101
|
+
m[k] = val
|
|
102
|
+
|
|
103
|
+
return m
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
CreateResponseFormatType = Literal["json_object",]
|
|
107
|
+
r"""Ensures the response is a valid JSON object"""
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class Format2TypedDict(TypedDict):
|
|
111
|
+
type: CreateResponseFormatType
|
|
112
|
+
r"""Ensures the response is a valid JSON object"""
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class Format2(BaseModel):
|
|
116
|
+
type: CreateResponseFormatType
|
|
117
|
+
r"""Ensures the response is a valid JSON object"""
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
FormatType = Literal["text",]
|
|
121
|
+
r"""Plain text response format"""
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class Format1TypedDict(TypedDict):
|
|
125
|
+
type: FormatType
|
|
126
|
+
r"""Plain text response format"""
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class Format1(BaseModel):
|
|
130
|
+
type: FormatType
|
|
131
|
+
r"""Plain text response format"""
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
CreateResponseFormatTypedDict = TypeAliasType(
|
|
135
|
+
"CreateResponseFormatTypedDict",
|
|
136
|
+
Union[Format1TypedDict, Format2TypedDict, Format3TypedDict],
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
CreateResponseFormat = Annotated[
|
|
141
|
+
Union[
|
|
142
|
+
Annotated[Format1, Tag("text")],
|
|
143
|
+
Annotated[Format2, Tag("json_object")],
|
|
144
|
+
Annotated[Format3, Tag("json_schema")],
|
|
145
|
+
],
|
|
146
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
147
|
+
]
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class CreateResponseTextTypedDict(TypedDict):
|
|
151
|
+
format_: CreateResponseFormatTypedDict
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class CreateResponseText(BaseModel):
|
|
155
|
+
format_: Annotated[CreateResponseFormat, pydantic.Field(alias="format")]
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
CreateResponse2RouterResponsesRequestRequestBodyInputType = Literal["function_call",]
|
|
159
|
+
r"""The type of input item"""
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class CreateResponse23TypedDict(TypedDict):
|
|
163
|
+
r"""Represents a function tool call, provided as input to the model."""
|
|
164
|
+
|
|
165
|
+
type: CreateResponse2RouterResponsesRequestRequestBodyInputType
|
|
166
|
+
r"""The type of input item"""
|
|
167
|
+
call_id: str
|
|
168
|
+
r"""The ID of the function call"""
|
|
169
|
+
id: str
|
|
170
|
+
r"""The unique identifier for this function call"""
|
|
171
|
+
name: str
|
|
172
|
+
r"""The name of the function being called"""
|
|
173
|
+
arguments: str
|
|
174
|
+
r"""The arguments to the function as a JSON string"""
|
|
175
|
+
status: str
|
|
176
|
+
r"""The status of the function call"""
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
class CreateResponse23(BaseModel):
|
|
180
|
+
r"""Represents a function tool call, provided as input to the model."""
|
|
181
|
+
|
|
182
|
+
type: CreateResponse2RouterResponsesRequestRequestBodyInputType
|
|
183
|
+
r"""The type of input item"""
|
|
184
|
+
|
|
185
|
+
call_id: str
|
|
186
|
+
r"""The ID of the function call"""
|
|
187
|
+
|
|
188
|
+
id: str
|
|
189
|
+
r"""The unique identifier for this function call"""
|
|
190
|
+
|
|
191
|
+
name: str
|
|
192
|
+
r"""The name of the function being called"""
|
|
193
|
+
|
|
194
|
+
arguments: str
|
|
195
|
+
r"""The arguments to the function as a JSON string"""
|
|
196
|
+
|
|
197
|
+
status: str
|
|
198
|
+
r"""The status of the function call"""
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
CreateResponse2RouterResponsesRequestRequestBodyType = Literal["function_call_output",]
|
|
202
|
+
r"""The type of input item"""
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class CreateResponse2RouterResponses2TypedDict(TypedDict):
|
|
206
|
+
r"""Represents the output of a function tool call, provided as input to the model."""
|
|
207
|
+
|
|
208
|
+
type: CreateResponse2RouterResponsesRequestRequestBodyType
|
|
209
|
+
r"""The type of input item"""
|
|
210
|
+
call_id: str
|
|
211
|
+
r"""The ID of the function call this output is for"""
|
|
212
|
+
output: str
|
|
213
|
+
r"""The output from the function call"""
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class CreateResponse2RouterResponses2(BaseModel):
|
|
217
|
+
r"""Represents the output of a function tool call, provided as input to the model."""
|
|
218
|
+
|
|
219
|
+
type: CreateResponse2RouterResponsesRequestRequestBodyType
|
|
220
|
+
r"""The type of input item"""
|
|
221
|
+
|
|
222
|
+
call_id: str
|
|
223
|
+
r"""The ID of the function call this output is for"""
|
|
224
|
+
|
|
225
|
+
output: str
|
|
226
|
+
r"""The output from the function call"""
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
TwoRole = Literal[
|
|
230
|
+
"user",
|
|
231
|
+
"assistant",
|
|
232
|
+
"system",
|
|
233
|
+
"developer",
|
|
234
|
+
]
|
|
235
|
+
r"""The role of the message author"""
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
CreateResponse2RouterResponsesRequestType = Literal["input_file",]
|
|
239
|
+
r"""The type of input content part"""
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
class Two3TypedDict(TypedDict):
|
|
243
|
+
r"""A file input content part."""
|
|
244
|
+
|
|
245
|
+
type: CreateResponse2RouterResponsesRequestType
|
|
246
|
+
r"""The type of input content part"""
|
|
247
|
+
file_data: NotRequired[str]
|
|
248
|
+
r"""Base64 encoded file data"""
|
|
249
|
+
file_id: NotRequired[str]
|
|
250
|
+
r"""File ID from the Files API"""
|
|
251
|
+
filename: NotRequired[str]
|
|
252
|
+
r"""Name of the file"""
|
|
253
|
+
file_url: NotRequired[str]
|
|
254
|
+
r"""URL of the file to fetch"""
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
class Two3(BaseModel):
|
|
258
|
+
r"""A file input content part."""
|
|
259
|
+
|
|
260
|
+
type: CreateResponse2RouterResponsesRequestType
|
|
261
|
+
r"""The type of input content part"""
|
|
262
|
+
|
|
263
|
+
file_data: Optional[str] = None
|
|
264
|
+
r"""Base64 encoded file data"""
|
|
265
|
+
|
|
266
|
+
file_id: Optional[str] = None
|
|
267
|
+
r"""File ID from the Files API"""
|
|
268
|
+
|
|
269
|
+
filename: Optional[str] = None
|
|
270
|
+
r"""Name of the file"""
|
|
271
|
+
|
|
272
|
+
file_url: Optional[str] = None
|
|
273
|
+
r"""URL of the file to fetch"""
|
|
274
|
+
|
|
275
|
+
@model_serializer(mode="wrap")
|
|
276
|
+
def serialize_model(self, handler):
|
|
277
|
+
optional_fields = set(["file_data", "file_id", "filename", "file_url"])
|
|
278
|
+
serialized = handler(self)
|
|
279
|
+
m = {}
|
|
280
|
+
|
|
281
|
+
for n, f in type(self).model_fields.items():
|
|
282
|
+
k = f.alias or n
|
|
283
|
+
val = serialized.get(k)
|
|
284
|
+
|
|
285
|
+
if val != UNSET_SENTINEL:
|
|
286
|
+
if val is not None or k not in optional_fields:
|
|
287
|
+
m[k] = val
|
|
288
|
+
|
|
289
|
+
return m
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
CreateResponse2RouterResponsesType = Literal["input_image",]
|
|
293
|
+
r"""The type of input content part"""
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
TwoDetail = Literal[
|
|
297
|
+
"high",
|
|
298
|
+
"low",
|
|
299
|
+
"auto",
|
|
300
|
+
]
|
|
301
|
+
r"""Level of detail for image analysis"""
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
class CreateResponse22TypedDict(TypedDict):
|
|
305
|
+
r"""An image input content part."""
|
|
306
|
+
|
|
307
|
+
type: CreateResponse2RouterResponsesType
|
|
308
|
+
r"""The type of input content part"""
|
|
309
|
+
detail: NotRequired[TwoDetail]
|
|
310
|
+
r"""Level of detail for image analysis"""
|
|
311
|
+
file_id: NotRequired[Nullable[str]]
|
|
312
|
+
r"""File ID for the image"""
|
|
313
|
+
image_url: NotRequired[Nullable[str]]
|
|
314
|
+
r"""URL of the image (can be http URL or data URL)"""
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class CreateResponse22(BaseModel):
|
|
318
|
+
r"""An image input content part."""
|
|
319
|
+
|
|
320
|
+
type: CreateResponse2RouterResponsesType
|
|
321
|
+
r"""The type of input content part"""
|
|
322
|
+
|
|
323
|
+
detail: Optional[TwoDetail] = "auto"
|
|
324
|
+
r"""Level of detail for image analysis"""
|
|
325
|
+
|
|
326
|
+
file_id: OptionalNullable[str] = UNSET
|
|
327
|
+
r"""File ID for the image"""
|
|
328
|
+
|
|
329
|
+
image_url: OptionalNullable[str] = UNSET
|
|
330
|
+
r"""URL of the image (can be http URL or data URL)"""
|
|
331
|
+
|
|
332
|
+
@model_serializer(mode="wrap")
|
|
333
|
+
def serialize_model(self, handler):
|
|
334
|
+
optional_fields = set(["detail", "file_id", "image_url"])
|
|
335
|
+
nullable_fields = set(["file_id", "image_url"])
|
|
336
|
+
serialized = handler(self)
|
|
337
|
+
m = {}
|
|
338
|
+
|
|
339
|
+
for n, f in type(self).model_fields.items():
|
|
340
|
+
k = f.alias or n
|
|
341
|
+
val = serialized.get(k)
|
|
342
|
+
is_nullable_and_explicitly_set = (
|
|
343
|
+
k in nullable_fields
|
|
344
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
if val != UNSET_SENTINEL:
|
|
348
|
+
if (
|
|
349
|
+
val is not None
|
|
350
|
+
or k not in optional_fields
|
|
351
|
+
or is_nullable_and_explicitly_set
|
|
352
|
+
):
|
|
353
|
+
m[k] = val
|
|
354
|
+
|
|
355
|
+
return m
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
CreateResponse2Type = Literal["input_text",]
|
|
359
|
+
r"""The type of input content"""
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
class CreateResponse2RouterResponses1TypedDict(TypedDict):
|
|
363
|
+
r"""A text input content part"""
|
|
364
|
+
|
|
365
|
+
type: CreateResponse2Type
|
|
366
|
+
r"""The type of input content"""
|
|
367
|
+
text: str
|
|
368
|
+
r"""The text content"""
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
class CreateResponse2RouterResponses1(BaseModel):
|
|
372
|
+
r"""A text input content part"""
|
|
373
|
+
|
|
374
|
+
type: CreateResponse2Type
|
|
375
|
+
r"""The type of input content"""
|
|
376
|
+
|
|
377
|
+
text: str
|
|
378
|
+
r"""The text content"""
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
CreateResponseContent2TypedDict = TypeAliasType(
|
|
382
|
+
"CreateResponseContent2TypedDict",
|
|
383
|
+
Union[
|
|
384
|
+
CreateResponse2RouterResponses1TypedDict,
|
|
385
|
+
CreateResponse22TypedDict,
|
|
386
|
+
Two3TypedDict,
|
|
387
|
+
],
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
CreateResponseContent2 = Annotated[
|
|
392
|
+
Union[
|
|
393
|
+
Annotated[CreateResponse2RouterResponses1, Tag("input_text")],
|
|
394
|
+
Annotated[CreateResponse22, Tag("input_image")],
|
|
395
|
+
Annotated[Two3, Tag("input_file")],
|
|
396
|
+
],
|
|
397
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
398
|
+
]
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
TwoContentTypedDict = TypeAliasType(
|
|
402
|
+
"TwoContentTypedDict", Union[str, List[CreateResponseContent2TypedDict]]
|
|
403
|
+
)
|
|
404
|
+
r"""The content of the message, either a string or an array of content parts"""
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
TwoContent = TypeAliasType("TwoContent", Union[str, List[CreateResponseContent2]])
|
|
408
|
+
r"""The content of the message, either a string or an array of content parts"""
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
class CreateResponse21TypedDict(TypedDict):
|
|
412
|
+
r"""Represents a message in the conversation, with a role and content (string or rich content parts)."""
|
|
413
|
+
|
|
414
|
+
role: TwoRole
|
|
415
|
+
r"""The role of the message author"""
|
|
416
|
+
content: TwoContentTypedDict
|
|
417
|
+
r"""The content of the message, either a string or an array of content parts"""
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
class CreateResponse21(BaseModel):
|
|
421
|
+
r"""Represents a message in the conversation, with a role and content (string or rich content parts)."""
|
|
422
|
+
|
|
423
|
+
role: TwoRole
|
|
424
|
+
r"""The role of the message author"""
|
|
425
|
+
|
|
426
|
+
content: TwoContent
|
|
427
|
+
r"""The content of the message, either a string or an array of content parts"""
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
Input2TypedDict = TypeAliasType(
|
|
431
|
+
"Input2TypedDict",
|
|
432
|
+
Union[
|
|
433
|
+
CreateResponse21TypedDict,
|
|
434
|
+
CreateResponse2RouterResponses2TypedDict,
|
|
435
|
+
CreateResponse23TypedDict,
|
|
436
|
+
],
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
Input2 = TypeAliasType(
|
|
441
|
+
"Input2", Union[CreateResponse21, CreateResponse2RouterResponses2, CreateResponse23]
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
CreateResponseInputTypedDict = TypeAliasType(
|
|
446
|
+
"CreateResponseInputTypedDict", Union[str, List[Input2TypedDict]]
|
|
447
|
+
)
|
|
448
|
+
r"""The actual user input(s) for the model. Can be a simple string, or an array of structured input items (messages, tool outputs) representing a conversation history or complex input."""
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
CreateResponseInput = TypeAliasType("CreateResponseInput", Union[str, List[Input2]])
|
|
452
|
+
r"""The actual user input(s) for the model. Can be a simple string, or an array of structured input items (messages, tool outputs) representing a conversation history or complex input."""
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
Include = Literal[
|
|
456
|
+
"code_interpreter_call.outputs",
|
|
457
|
+
"computer_call_output.output.image_url",
|
|
458
|
+
"file_search_call.results",
|
|
459
|
+
"message.input_image.image_url",
|
|
460
|
+
"message.output_text.logprobs",
|
|
461
|
+
"reasoning.encrypted_content",
|
|
462
|
+
]
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
CreateResponseToolsRouterResponsesRequestRequestBodyType = Literal["file_search",]
|
|
466
|
+
r"""The type of tool"""
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
Ranker = Literal[
|
|
470
|
+
"auto",
|
|
471
|
+
"default_2024_08_21",
|
|
472
|
+
]
|
|
473
|
+
r"""The ranking algorithm"""
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
class RankingOptionsTypedDict(TypedDict):
|
|
477
|
+
r"""Options for ranking search results"""
|
|
478
|
+
|
|
479
|
+
ranker: NotRequired[Ranker]
|
|
480
|
+
r"""The ranking algorithm"""
|
|
481
|
+
score_threshold: NotRequired[float]
|
|
482
|
+
r"""Minimum relevance score"""
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
class RankingOptions(BaseModel):
|
|
486
|
+
r"""Options for ranking search results"""
|
|
487
|
+
|
|
488
|
+
ranker: Optional[Ranker] = "auto"
|
|
489
|
+
r"""The ranking algorithm"""
|
|
490
|
+
|
|
491
|
+
score_threshold: Optional[float] = 0
|
|
492
|
+
r"""Minimum relevance score"""
|
|
493
|
+
|
|
494
|
+
@model_serializer(mode="wrap")
|
|
495
|
+
def serialize_model(self, handler):
|
|
496
|
+
optional_fields = set(["ranker", "score_threshold"])
|
|
497
|
+
serialized = handler(self)
|
|
498
|
+
m = {}
|
|
499
|
+
|
|
500
|
+
for n, f in type(self).model_fields.items():
|
|
501
|
+
k = f.alias or n
|
|
502
|
+
val = serialized.get(k)
|
|
503
|
+
|
|
504
|
+
if val != UNSET_SENTINEL:
|
|
505
|
+
if val is not None or k not in optional_fields:
|
|
506
|
+
m[k] = val
|
|
507
|
+
|
|
508
|
+
return m
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
class Tools3TypedDict(TypedDict):
|
|
512
|
+
r"""Configuration for file search tool"""
|
|
513
|
+
|
|
514
|
+
type: CreateResponseToolsRouterResponsesRequestRequestBodyType
|
|
515
|
+
r"""The type of tool"""
|
|
516
|
+
vector_store_ids: NotRequired[List[str]]
|
|
517
|
+
r"""The vector stores to search"""
|
|
518
|
+
max_num_results: NotRequired[int]
|
|
519
|
+
r"""Maximum number of results to return"""
|
|
520
|
+
filters: NotRequired[Any]
|
|
521
|
+
r"""Filters to apply to the search"""
|
|
522
|
+
ranking_options: NotRequired[RankingOptionsTypedDict]
|
|
523
|
+
r"""Options for ranking search results"""
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
class Tools3(BaseModel):
|
|
527
|
+
r"""Configuration for file search tool"""
|
|
528
|
+
|
|
529
|
+
type: CreateResponseToolsRouterResponsesRequestRequestBodyType
|
|
530
|
+
r"""The type of tool"""
|
|
531
|
+
|
|
532
|
+
vector_store_ids: Optional[List[str]] = None
|
|
533
|
+
r"""The vector stores to search"""
|
|
534
|
+
|
|
535
|
+
max_num_results: Optional[int] = 20
|
|
536
|
+
r"""Maximum number of results to return"""
|
|
537
|
+
|
|
538
|
+
filters: Optional[Any] = None
|
|
539
|
+
r"""Filters to apply to the search"""
|
|
540
|
+
|
|
541
|
+
ranking_options: Optional[RankingOptions] = None
|
|
542
|
+
r"""Options for ranking search results"""
|
|
543
|
+
|
|
544
|
+
@model_serializer(mode="wrap")
|
|
545
|
+
def serialize_model(self, handler):
|
|
546
|
+
optional_fields = set(
|
|
547
|
+
["vector_store_ids", "max_num_results", "filters", "ranking_options"]
|
|
548
|
+
)
|
|
549
|
+
serialized = handler(self)
|
|
550
|
+
m = {}
|
|
551
|
+
|
|
552
|
+
for n, f in type(self).model_fields.items():
|
|
553
|
+
k = f.alias or n
|
|
554
|
+
val = serialized.get(k)
|
|
555
|
+
|
|
556
|
+
if val != UNSET_SENTINEL:
|
|
557
|
+
if val is not None or k not in optional_fields:
|
|
558
|
+
m[k] = val
|
|
559
|
+
|
|
560
|
+
return m
|
|
561
|
+
|
|
562
|
+
|
|
563
|
+
CreateResponseToolsRouterResponsesType = Literal["web_search_preview",]
|
|
564
|
+
r"""The type of tool"""
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
SearchContextSize = Literal[
|
|
568
|
+
"small",
|
|
569
|
+
"medium",
|
|
570
|
+
"large",
|
|
571
|
+
]
|
|
572
|
+
r"""Amount of context to retrieve for each search result"""
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
CreateResponseToolsRouterResponsesRequestType = Literal[
|
|
576
|
+
"approximate",
|
|
577
|
+
"exact",
|
|
578
|
+
]
|
|
579
|
+
r"""The type of location"""
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
class UserLocationTypedDict(TypedDict):
|
|
583
|
+
r"""User location for search localization"""
|
|
584
|
+
|
|
585
|
+
type: NotRequired[CreateResponseToolsRouterResponsesRequestType]
|
|
586
|
+
r"""The type of location"""
|
|
587
|
+
city: NotRequired[Nullable[str]]
|
|
588
|
+
r"""The city name"""
|
|
589
|
+
country: NotRequired[str]
|
|
590
|
+
r"""The country code"""
|
|
591
|
+
region: NotRequired[Nullable[str]]
|
|
592
|
+
r"""The region/state"""
|
|
593
|
+
timezone: NotRequired[Nullable[str]]
|
|
594
|
+
r"""The timezone"""
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
class UserLocation(BaseModel):
|
|
598
|
+
r"""User location for search localization"""
|
|
599
|
+
|
|
600
|
+
type: Optional[CreateResponseToolsRouterResponsesRequestType] = None
|
|
601
|
+
r"""The type of location"""
|
|
602
|
+
|
|
603
|
+
city: OptionalNullable[str] = UNSET
|
|
604
|
+
r"""The city name"""
|
|
605
|
+
|
|
606
|
+
country: Optional[str] = None
|
|
607
|
+
r"""The country code"""
|
|
608
|
+
|
|
609
|
+
region: OptionalNullable[str] = UNSET
|
|
610
|
+
r"""The region/state"""
|
|
611
|
+
|
|
612
|
+
timezone: OptionalNullable[str] = UNSET
|
|
613
|
+
r"""The timezone"""
|
|
614
|
+
|
|
615
|
+
@model_serializer(mode="wrap")
|
|
616
|
+
def serialize_model(self, handler):
|
|
617
|
+
optional_fields = set(["type", "city", "country", "region", "timezone"])
|
|
618
|
+
nullable_fields = set(["city", "region", "timezone"])
|
|
619
|
+
serialized = handler(self)
|
|
620
|
+
m = {}
|
|
621
|
+
|
|
622
|
+
for n, f in type(self).model_fields.items():
|
|
623
|
+
k = f.alias or n
|
|
624
|
+
val = serialized.get(k)
|
|
625
|
+
is_nullable_and_explicitly_set = (
|
|
626
|
+
k in nullable_fields
|
|
627
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
628
|
+
)
|
|
629
|
+
|
|
630
|
+
if val != UNSET_SENTINEL:
|
|
631
|
+
if (
|
|
632
|
+
val is not None
|
|
633
|
+
or k not in optional_fields
|
|
634
|
+
or is_nullable_and_explicitly_set
|
|
635
|
+
):
|
|
636
|
+
m[k] = val
|
|
637
|
+
|
|
638
|
+
return m
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
class Tools2TypedDict(TypedDict):
|
|
642
|
+
r"""Configuration for web search tool"""
|
|
643
|
+
|
|
644
|
+
type: CreateResponseToolsRouterResponsesType
|
|
645
|
+
r"""The type of tool"""
|
|
646
|
+
domains: NotRequired[List[str]]
|
|
647
|
+
r"""List of domains to restrict search to"""
|
|
648
|
+
search_context_size: NotRequired[SearchContextSize]
|
|
649
|
+
r"""Amount of context to retrieve for each search result"""
|
|
650
|
+
user_location: NotRequired[UserLocationTypedDict]
|
|
651
|
+
r"""User location for search localization"""
|
|
652
|
+
|
|
653
|
+
|
|
654
|
+
class Tools2(BaseModel):
|
|
655
|
+
r"""Configuration for web search tool"""
|
|
656
|
+
|
|
657
|
+
type: CreateResponseToolsRouterResponsesType
|
|
658
|
+
r"""The type of tool"""
|
|
659
|
+
|
|
660
|
+
domains: Optional[List[str]] = None
|
|
661
|
+
r"""List of domains to restrict search to"""
|
|
662
|
+
|
|
663
|
+
search_context_size: Optional[SearchContextSize] = "medium"
|
|
664
|
+
r"""Amount of context to retrieve for each search result"""
|
|
665
|
+
|
|
666
|
+
user_location: Optional[UserLocation] = None
|
|
667
|
+
r"""User location for search localization"""
|
|
668
|
+
|
|
669
|
+
@model_serializer(mode="wrap")
|
|
670
|
+
def serialize_model(self, handler):
|
|
671
|
+
optional_fields = set(["domains", "search_context_size", "user_location"])
|
|
672
|
+
serialized = handler(self)
|
|
673
|
+
m = {}
|
|
674
|
+
|
|
675
|
+
for n, f in type(self).model_fields.items():
|
|
676
|
+
k = f.alias or n
|
|
677
|
+
val = serialized.get(k)
|
|
678
|
+
|
|
679
|
+
if val != UNSET_SENTINEL:
|
|
680
|
+
if val is not None or k not in optional_fields:
|
|
681
|
+
m[k] = val
|
|
682
|
+
|
|
683
|
+
return m
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
ToolsType = Literal["function",]
|
|
687
|
+
r"""The type of tool"""
|
|
688
|
+
|
|
689
|
+
|
|
690
|
+
CreateResponseToolsType = Literal["object",]
|
|
691
|
+
r"""The type of the parameters object"""
|
|
692
|
+
|
|
693
|
+
|
|
694
|
+
class PropertiesTypedDict(TypedDict):
|
|
695
|
+
type: str
|
|
696
|
+
description: NotRequired[str]
|
|
697
|
+
enum: NotRequired[List[str]]
|
|
698
|
+
|
|
699
|
+
|
|
700
|
+
class Properties(BaseModel):
|
|
701
|
+
model_config = ConfigDict(
|
|
702
|
+
populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
|
|
703
|
+
)
|
|
704
|
+
__pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
|
|
705
|
+
|
|
706
|
+
type: str
|
|
707
|
+
|
|
708
|
+
description: Optional[str] = None
|
|
709
|
+
|
|
710
|
+
enum: Optional[List[str]] = None
|
|
711
|
+
|
|
712
|
+
@property
|
|
713
|
+
def additional_properties(self):
|
|
714
|
+
return self.__pydantic_extra__
|
|
715
|
+
|
|
716
|
+
@additional_properties.setter
|
|
717
|
+
def additional_properties(self, value):
|
|
718
|
+
self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
|
|
719
|
+
|
|
720
|
+
@model_serializer(mode="wrap")
|
|
721
|
+
def serialize_model(self, handler):
|
|
722
|
+
optional_fields = set(["description", "enum"])
|
|
723
|
+
serialized = handler(self)
|
|
724
|
+
m = {}
|
|
725
|
+
|
|
726
|
+
for n, f in type(self).model_fields.items():
|
|
727
|
+
k = f.alias or n
|
|
728
|
+
val = serialized.get(k)
|
|
729
|
+
serialized.pop(k, None)
|
|
730
|
+
|
|
731
|
+
if val != UNSET_SENTINEL:
|
|
732
|
+
if val is not None or k not in optional_fields:
|
|
733
|
+
m[k] = val
|
|
734
|
+
for k, v in serialized.items():
|
|
735
|
+
m[k] = v
|
|
736
|
+
|
|
737
|
+
return m
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
class ToolsParametersTypedDict(TypedDict):
|
|
741
|
+
r"""The parameters the function accepts"""
|
|
742
|
+
|
|
743
|
+
type: CreateResponseToolsType
|
|
744
|
+
r"""The type of the parameters object"""
|
|
745
|
+
properties: Dict[str, PropertiesTypedDict]
|
|
746
|
+
r"""The parameters the function accepts, described as a JSON Schema object"""
|
|
747
|
+
required: NotRequired[List[str]]
|
|
748
|
+
r"""List of required parameter names"""
|
|
749
|
+
additional_properties: NotRequired[bool]
|
|
750
|
+
r"""Whether to allow properties not defined in the schema"""
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
class ToolsParameters(BaseModel):
|
|
754
|
+
r"""The parameters the function accepts"""
|
|
755
|
+
|
|
756
|
+
type: CreateResponseToolsType
|
|
757
|
+
r"""The type of the parameters object"""
|
|
758
|
+
|
|
759
|
+
properties: Dict[str, Properties]
|
|
760
|
+
r"""The parameters the function accepts, described as a JSON Schema object"""
|
|
761
|
+
|
|
762
|
+
required: Optional[List[str]] = None
|
|
763
|
+
r"""List of required parameter names"""
|
|
764
|
+
|
|
765
|
+
additional_properties: Annotated[
|
|
766
|
+
Optional[bool], pydantic.Field(alias="additionalProperties")
|
|
767
|
+
] = None
|
|
768
|
+
r"""Whether to allow properties not defined in the schema"""
|
|
769
|
+
|
|
770
|
+
@model_serializer(mode="wrap")
|
|
771
|
+
def serialize_model(self, handler):
|
|
772
|
+
optional_fields = set(["required", "additionalProperties"])
|
|
773
|
+
serialized = handler(self)
|
|
774
|
+
m = {}
|
|
775
|
+
|
|
776
|
+
for n, f in type(self).model_fields.items():
|
|
777
|
+
k = f.alias or n
|
|
778
|
+
val = serialized.get(k)
|
|
779
|
+
|
|
780
|
+
if val != UNSET_SENTINEL:
|
|
781
|
+
if val is not None or k not in optional_fields:
|
|
782
|
+
m[k] = val
|
|
783
|
+
|
|
784
|
+
return m
|
|
785
|
+
|
|
786
|
+
|
|
787
|
+
class Tools1TypedDict(TypedDict):
|
|
788
|
+
r"""A function tool definition"""
|
|
789
|
+
|
|
790
|
+
type: ToolsType
|
|
791
|
+
r"""The type of tool"""
|
|
792
|
+
name: str
|
|
793
|
+
r"""The name of the function to be called"""
|
|
794
|
+
parameters: ToolsParametersTypedDict
|
|
795
|
+
r"""The parameters the function accepts"""
|
|
796
|
+
description: NotRequired[Nullable[str]]
|
|
797
|
+
r"""A description of what the function does"""
|
|
798
|
+
strict: NotRequired[bool]
|
|
799
|
+
r"""Whether to enable strict schema adherence when generating function calls"""
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
class Tools1(BaseModel):
|
|
803
|
+
r"""A function tool definition"""
|
|
804
|
+
|
|
805
|
+
type: ToolsType
|
|
806
|
+
r"""The type of tool"""
|
|
807
|
+
|
|
808
|
+
name: str
|
|
809
|
+
r"""The name of the function to be called"""
|
|
810
|
+
|
|
811
|
+
parameters: ToolsParameters
|
|
812
|
+
r"""The parameters the function accepts"""
|
|
813
|
+
|
|
814
|
+
description: OptionalNullable[str] = UNSET
|
|
815
|
+
r"""A description of what the function does"""
|
|
816
|
+
|
|
817
|
+
strict: Optional[bool] = True
|
|
818
|
+
r"""Whether to enable strict schema adherence when generating function calls"""
|
|
819
|
+
|
|
820
|
+
@model_serializer(mode="wrap")
|
|
821
|
+
def serialize_model(self, handler):
|
|
822
|
+
optional_fields = set(["description", "strict"])
|
|
823
|
+
nullable_fields = set(["description"])
|
|
824
|
+
serialized = handler(self)
|
|
825
|
+
m = {}
|
|
826
|
+
|
|
827
|
+
for n, f in type(self).model_fields.items():
|
|
828
|
+
k = f.alias or n
|
|
829
|
+
val = serialized.get(k)
|
|
830
|
+
is_nullable_and_explicitly_set = (
|
|
831
|
+
k in nullable_fields
|
|
832
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
if val != UNSET_SENTINEL:
|
|
836
|
+
if (
|
|
837
|
+
val is not None
|
|
838
|
+
or k not in optional_fields
|
|
839
|
+
or is_nullable_and_explicitly_set
|
|
840
|
+
):
|
|
841
|
+
m[k] = val
|
|
842
|
+
|
|
843
|
+
return m
|
|
844
|
+
|
|
845
|
+
|
|
846
|
+
CreateResponseToolsTypedDict = TypeAliasType(
|
|
847
|
+
"CreateResponseToolsTypedDict",
|
|
848
|
+
Union[Tools2TypedDict, Tools1TypedDict, Tools3TypedDict],
|
|
849
|
+
)
|
|
850
|
+
|
|
851
|
+
|
|
852
|
+
CreateResponseTools = Annotated[
|
|
853
|
+
Union[
|
|
854
|
+
Annotated[Tools1, Tag("function")],
|
|
855
|
+
Annotated[Tools2, Tag("web_search_preview")],
|
|
856
|
+
Annotated[Tools3, Tag("file_search")],
|
|
857
|
+
],
|
|
858
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
859
|
+
]
|
|
860
|
+
|
|
861
|
+
|
|
862
|
+
CreateResponseToolChoiceRouterResponsesRequestType = Literal["mcp",]
|
|
863
|
+
|
|
864
|
+
|
|
865
|
+
class ToolChoice4TypedDict(TypedDict):
|
|
866
|
+
type: CreateResponseToolChoiceRouterResponsesRequestType
|
|
867
|
+
server_label: str
|
|
868
|
+
name: NotRequired[Nullable[str]]
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
class ToolChoice4(BaseModel):
|
|
872
|
+
type: CreateResponseToolChoiceRouterResponsesRequestType
|
|
873
|
+
|
|
874
|
+
server_label: str
|
|
875
|
+
|
|
876
|
+
name: OptionalNullable[str] = UNSET
|
|
877
|
+
|
|
878
|
+
@model_serializer(mode="wrap")
|
|
879
|
+
def serialize_model(self, handler):
|
|
880
|
+
optional_fields = set(["name"])
|
|
881
|
+
nullable_fields = set(["name"])
|
|
882
|
+
serialized = handler(self)
|
|
883
|
+
m = {}
|
|
884
|
+
|
|
885
|
+
for n, f in type(self).model_fields.items():
|
|
886
|
+
k = f.alias or n
|
|
887
|
+
val = serialized.get(k)
|
|
888
|
+
is_nullable_and_explicitly_set = (
|
|
889
|
+
k in nullable_fields
|
|
890
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
if val != UNSET_SENTINEL:
|
|
894
|
+
if (
|
|
895
|
+
val is not None
|
|
896
|
+
or k not in optional_fields
|
|
897
|
+
or is_nullable_and_explicitly_set
|
|
898
|
+
):
|
|
899
|
+
m[k] = val
|
|
900
|
+
|
|
901
|
+
return m
|
|
902
|
+
|
|
903
|
+
|
|
904
|
+
CreateResponseToolChoiceRouterResponsesType = Literal["function",]
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
class ToolChoice3TypedDict(TypedDict):
|
|
908
|
+
type: CreateResponseToolChoiceRouterResponsesType
|
|
909
|
+
name: str
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
class ToolChoice3(BaseModel):
|
|
913
|
+
type: CreateResponseToolChoiceRouterResponsesType
|
|
914
|
+
|
|
915
|
+
name: str
|
|
916
|
+
|
|
917
|
+
|
|
918
|
+
CreateResponseToolChoiceType = Literal[
|
|
919
|
+
"file_search",
|
|
920
|
+
"web_search_preview",
|
|
921
|
+
"computer_use_preview",
|
|
922
|
+
"code_interpreter",
|
|
923
|
+
"image_generation",
|
|
924
|
+
]
|
|
925
|
+
|
|
926
|
+
|
|
927
|
+
class CreateResponseToolChoice2TypedDict(TypedDict):
|
|
928
|
+
type: CreateResponseToolChoiceType
|
|
929
|
+
|
|
930
|
+
|
|
931
|
+
class CreateResponseToolChoice2(BaseModel):
|
|
932
|
+
type: CreateResponseToolChoiceType
|
|
933
|
+
|
|
934
|
+
|
|
935
|
+
CreateResponseToolChoice1 = Literal[
|
|
936
|
+
"none",
|
|
937
|
+
"auto",
|
|
938
|
+
"required",
|
|
939
|
+
]
|
|
940
|
+
r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool. `auto` means the model can pick between generating a message or calling a tool. `required` means the model must call a tool."""
|
|
941
|
+
|
|
942
|
+
|
|
943
|
+
CreateResponseToolChoiceTypedDict = TypeAliasType(
|
|
944
|
+
"CreateResponseToolChoiceTypedDict",
|
|
945
|
+
Union[
|
|
946
|
+
CreateResponseToolChoice2TypedDict,
|
|
947
|
+
ToolChoice3TypedDict,
|
|
948
|
+
ToolChoice4TypedDict,
|
|
949
|
+
CreateResponseToolChoice1,
|
|
950
|
+
],
|
|
951
|
+
)
|
|
952
|
+
r"""How the model should select which tool (or tools) to use when generating a response. Can be a string (`none`, `auto`, `required`) or an object to force a specific tool."""
|
|
953
|
+
|
|
954
|
+
|
|
955
|
+
CreateResponseToolChoice = TypeAliasType(
|
|
956
|
+
"CreateResponseToolChoice",
|
|
957
|
+
Union[
|
|
958
|
+
CreateResponseToolChoice2, ToolChoice3, ToolChoice4, CreateResponseToolChoice1
|
|
959
|
+
],
|
|
960
|
+
)
|
|
961
|
+
r"""How the model should select which tool (or tools) to use when generating a response. Can be a string (`none`, `auto`, `required`) or an object to force a specific tool."""
|
|
962
|
+
|
|
963
|
+
|
|
964
|
+
class CreateResponseRequestBodyTypedDict(TypedDict):
|
|
965
|
+
model: str
|
|
966
|
+
r"""ID of the model to use. You can use the List models API to see all of your available models."""
|
|
967
|
+
input: CreateResponseInputTypedDict
|
|
968
|
+
r"""The actual user input(s) for the model. Can be a simple string, or an array of structured input items (messages, tool outputs) representing a conversation history or complex input."""
|
|
969
|
+
metadata: NotRequired[Dict[str, str]]
|
|
970
|
+
r"""Developer-defined key-value pairs that will be included in response objects"""
|
|
971
|
+
temperature: NotRequired[Nullable[float]]
|
|
972
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
973
|
+
top_p: NotRequired[Nullable[float]]
|
|
974
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered."""
|
|
975
|
+
previous_response_id: NotRequired[Nullable[str]]
|
|
976
|
+
r"""The ID of a previous response to continue the conversation from. The model will have access to the previous response context."""
|
|
977
|
+
instructions: NotRequired[Nullable[str]]
|
|
978
|
+
r"""Developer-provided instructions that the model should follow. Overwrites the default system message."""
|
|
979
|
+
reasoning: NotRequired[Nullable[ReasoningTypedDict]]
|
|
980
|
+
r"""Configuration for reasoning models"""
|
|
981
|
+
max_output_tokens: NotRequired[Nullable[int]]
|
|
982
|
+
r"""The maximum number of tokens that can be generated in the response"""
|
|
983
|
+
text: NotRequired[Nullable[CreateResponseTextTypedDict]]
|
|
984
|
+
include: NotRequired[Nullable[List[Include]]]
|
|
985
|
+
r"""Specifies which (potentially large) fields to include in the response. By default, the results of Code Interpreter and file searches are excluded. Available options:
|
|
986
|
+
- code_interpreter_call.outputs: Include the outputs of Code Interpreter tool calls
|
|
987
|
+
- computer_call_output.output.image_url: Include the image URLs from computer use tool calls
|
|
988
|
+
- file_search_call.results: Include the results of file search tool calls
|
|
989
|
+
- message.input_image.image_url: Include URLs of input images
|
|
990
|
+
- message.output_text.logprobs: Include log probabilities for output text (when logprobs is enabled)
|
|
991
|
+
- reasoning.encrypted_content: Include encrypted reasoning content for reasoning models
|
|
992
|
+
"""
|
|
993
|
+
parallel_tool_calls: NotRequired[Nullable[bool]]
|
|
994
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
995
|
+
store: NotRequired[Nullable[bool]]
|
|
996
|
+
r"""Whether to store this response for use in distillations or evals."""
|
|
997
|
+
tools: NotRequired[List[CreateResponseToolsTypedDict]]
|
|
998
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
999
|
+
tool_choice: NotRequired[CreateResponseToolChoiceTypedDict]
|
|
1000
|
+
r"""How the model should select which tool (or tools) to use when generating a response. Can be a string (`none`, `auto`, `required`) or an object to force a specific tool."""
|
|
1001
|
+
stream: NotRequired[bool]
|
|
1002
|
+
|
|
1003
|
+
|
|
1004
|
+
class CreateResponseRequestBody(BaseModel):
|
|
1005
|
+
model: str
|
|
1006
|
+
r"""ID of the model to use. You can use the List models API to see all of your available models."""
|
|
1007
|
+
|
|
1008
|
+
input: CreateResponseInput
|
|
1009
|
+
r"""The actual user input(s) for the model. Can be a simple string, or an array of structured input items (messages, tool outputs) representing a conversation history or complex input."""
|
|
1010
|
+
|
|
1011
|
+
metadata: Optional[Dict[str, str]] = None
|
|
1012
|
+
r"""Developer-defined key-value pairs that will be included in response objects"""
|
|
1013
|
+
|
|
1014
|
+
temperature: OptionalNullable[float] = UNSET
|
|
1015
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
1016
|
+
|
|
1017
|
+
top_p: OptionalNullable[float] = UNSET
|
|
1018
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered."""
|
|
1019
|
+
|
|
1020
|
+
previous_response_id: OptionalNullable[str] = UNSET
|
|
1021
|
+
r"""The ID of a previous response to continue the conversation from. The model will have access to the previous response context."""
|
|
1022
|
+
|
|
1023
|
+
instructions: OptionalNullable[str] = UNSET
|
|
1024
|
+
r"""Developer-provided instructions that the model should follow. Overwrites the default system message."""
|
|
1025
|
+
|
|
1026
|
+
reasoning: OptionalNullable[Reasoning] = UNSET
|
|
1027
|
+
r"""Configuration for reasoning models"""
|
|
1028
|
+
|
|
1029
|
+
max_output_tokens: OptionalNullable[int] = UNSET
|
|
1030
|
+
r"""The maximum number of tokens that can be generated in the response"""
|
|
1031
|
+
|
|
1032
|
+
text: OptionalNullable[CreateResponseText] = UNSET
|
|
1033
|
+
|
|
1034
|
+
include: OptionalNullable[List[Include]] = UNSET
|
|
1035
|
+
r"""Specifies which (potentially large) fields to include in the response. By default, the results of Code Interpreter and file searches are excluded. Available options:
|
|
1036
|
+
- code_interpreter_call.outputs: Include the outputs of Code Interpreter tool calls
|
|
1037
|
+
- computer_call_output.output.image_url: Include the image URLs from computer use tool calls
|
|
1038
|
+
- file_search_call.results: Include the results of file search tool calls
|
|
1039
|
+
- message.input_image.image_url: Include URLs of input images
|
|
1040
|
+
- message.output_text.logprobs: Include log probabilities for output text (when logprobs is enabled)
|
|
1041
|
+
- reasoning.encrypted_content: Include encrypted reasoning content for reasoning models
|
|
1042
|
+
"""
|
|
1043
|
+
|
|
1044
|
+
parallel_tool_calls: OptionalNullable[bool] = UNSET
|
|
1045
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
1046
|
+
|
|
1047
|
+
store: OptionalNullable[bool] = True
|
|
1048
|
+
r"""Whether to store this response for use in distillations or evals."""
|
|
1049
|
+
|
|
1050
|
+
tools: Optional[List[CreateResponseTools]] = None
|
|
1051
|
+
r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
|
|
1052
|
+
|
|
1053
|
+
tool_choice: Optional[CreateResponseToolChoice] = None
|
|
1054
|
+
r"""How the model should select which tool (or tools) to use when generating a response. Can be a string (`none`, `auto`, `required`) or an object to force a specific tool."""
|
|
1055
|
+
|
|
1056
|
+
stream: Optional[bool] = False
|
|
1057
|
+
|
|
1058
|
+
@model_serializer(mode="wrap")
|
|
1059
|
+
def serialize_model(self, handler):
|
|
1060
|
+
optional_fields = set(
|
|
1061
|
+
[
|
|
1062
|
+
"metadata",
|
|
1063
|
+
"temperature",
|
|
1064
|
+
"top_p",
|
|
1065
|
+
"previous_response_id",
|
|
1066
|
+
"instructions",
|
|
1067
|
+
"reasoning",
|
|
1068
|
+
"max_output_tokens",
|
|
1069
|
+
"text",
|
|
1070
|
+
"include",
|
|
1071
|
+
"parallel_tool_calls",
|
|
1072
|
+
"store",
|
|
1073
|
+
"tools",
|
|
1074
|
+
"tool_choice",
|
|
1075
|
+
"stream",
|
|
1076
|
+
]
|
|
1077
|
+
)
|
|
1078
|
+
nullable_fields = set(
|
|
1079
|
+
[
|
|
1080
|
+
"temperature",
|
|
1081
|
+
"top_p",
|
|
1082
|
+
"previous_response_id",
|
|
1083
|
+
"instructions",
|
|
1084
|
+
"reasoning",
|
|
1085
|
+
"max_output_tokens",
|
|
1086
|
+
"text",
|
|
1087
|
+
"include",
|
|
1088
|
+
"parallel_tool_calls",
|
|
1089
|
+
"store",
|
|
1090
|
+
]
|
|
1091
|
+
)
|
|
1092
|
+
serialized = handler(self)
|
|
1093
|
+
m = {}
|
|
1094
|
+
|
|
1095
|
+
for n, f in type(self).model_fields.items():
|
|
1096
|
+
k = f.alias or n
|
|
1097
|
+
val = serialized.get(k)
|
|
1098
|
+
is_nullable_and_explicitly_set = (
|
|
1099
|
+
k in nullable_fields
|
|
1100
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1101
|
+
)
|
|
1102
|
+
|
|
1103
|
+
if val != UNSET_SENTINEL:
|
|
1104
|
+
if (
|
|
1105
|
+
val is not None
|
|
1106
|
+
or k not in optional_fields
|
|
1107
|
+
or is_nullable_and_explicitly_set
|
|
1108
|
+
):
|
|
1109
|
+
m[k] = val
|
|
1110
|
+
|
|
1111
|
+
return m
|
|
1112
|
+
|
|
1113
|
+
|
|
1114
|
+
class CreateResponseDataTypedDict(TypedDict):
|
|
1115
|
+
r"""One server-sent event emitted while the response streams"""
|
|
1116
|
+
|
|
1117
|
+
type: str
|
|
1118
|
+
r"""The type of streaming event"""
|
|
1119
|
+
|
|
1120
|
+
|
|
1121
|
+
class CreateResponseData(BaseModel):
|
|
1122
|
+
r"""One server-sent event emitted while the response streams"""
|
|
1123
|
+
|
|
1124
|
+
model_config = ConfigDict(
|
|
1125
|
+
populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
|
|
1126
|
+
)
|
|
1127
|
+
__pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
|
|
1128
|
+
|
|
1129
|
+
type: str
|
|
1130
|
+
r"""The type of streaming event"""
|
|
1131
|
+
|
|
1132
|
+
@property
|
|
1133
|
+
def additional_properties(self):
|
|
1134
|
+
return self.__pydantic_extra__
|
|
1135
|
+
|
|
1136
|
+
@additional_properties.setter
|
|
1137
|
+
def additional_properties(self, value):
|
|
1138
|
+
self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
|
|
1139
|
+
|
|
1140
|
+
|
|
1141
|
+
class CreateResponseRouterResponsesResponseBodyTypedDict(TypedDict):
|
|
1142
|
+
r"""One server-sent event emitted while the response streams"""
|
|
1143
|
+
|
|
1144
|
+
data: NotRequired[CreateResponseDataTypedDict]
|
|
1145
|
+
r"""One server-sent event emitted while the response streams"""
|
|
1146
|
+
|
|
1147
|
+
|
|
1148
|
+
class CreateResponseRouterResponsesResponseBody(BaseModel):
|
|
1149
|
+
r"""One server-sent event emitted while the response streams"""
|
|
1150
|
+
|
|
1151
|
+
data: Optional[CreateResponseData] = None
|
|
1152
|
+
r"""One server-sent event emitted while the response streams"""
|
|
1153
|
+
|
|
1154
|
+
@model_serializer(mode="wrap")
|
|
1155
|
+
def serialize_model(self, handler):
|
|
1156
|
+
optional_fields = set(["data"])
|
|
1157
|
+
serialized = handler(self)
|
|
1158
|
+
m = {}
|
|
1159
|
+
|
|
1160
|
+
for n, f in type(self).model_fields.items():
|
|
1161
|
+
k = f.alias or n
|
|
1162
|
+
val = serialized.get(k)
|
|
1163
|
+
|
|
1164
|
+
if val != UNSET_SENTINEL:
|
|
1165
|
+
if val is not None or k not in optional_fields:
|
|
1166
|
+
m[k] = val
|
|
1167
|
+
|
|
1168
|
+
return m
|
|
1169
|
+
|
|
1170
|
+
|
|
1171
|
+
CreateResponseObject = Literal["response",]
|
|
1172
|
+
r"""The object type, which is always \"response\" """
|
|
1173
|
+
|
|
1174
|
+
|
|
1175
|
+
CreateResponseStatus = Literal[
|
|
1176
|
+
"completed",
|
|
1177
|
+
"failed",
|
|
1178
|
+
"in_progress",
|
|
1179
|
+
"incomplete",
|
|
1180
|
+
]
|
|
1181
|
+
r"""The status of the response"""
|
|
1182
|
+
|
|
1183
|
+
|
|
1184
|
+
class CreateResponseErrorTypedDict(TypedDict):
|
|
1185
|
+
r"""The error that occurred, if any"""
|
|
1186
|
+
|
|
1187
|
+
code: str
|
|
1188
|
+
r"""The error code"""
|
|
1189
|
+
message: str
|
|
1190
|
+
r"""The error message"""
|
|
1191
|
+
|
|
1192
|
+
|
|
1193
|
+
class CreateResponseError(BaseModel):
|
|
1194
|
+
r"""The error that occurred, if any"""
|
|
1195
|
+
|
|
1196
|
+
code: str
|
|
1197
|
+
r"""The error code"""
|
|
1198
|
+
|
|
1199
|
+
message: str
|
|
1200
|
+
r"""The error message"""
|
|
1201
|
+
|
|
1202
|
+
|
|
1203
|
+
Reason = Literal[
|
|
1204
|
+
"max_output_tokens",
|
|
1205
|
+
"content_filter",
|
|
1206
|
+
]
|
|
1207
|
+
r"""The reason the response is incomplete"""
|
|
1208
|
+
|
|
1209
|
+
|
|
1210
|
+
class IncompleteDetailsTypedDict(TypedDict):
|
|
1211
|
+
r"""Details about why the response is incomplete"""
|
|
1212
|
+
|
|
1213
|
+
reason: Reason
|
|
1214
|
+
r"""The reason the response is incomplete"""
|
|
1215
|
+
|
|
1216
|
+
|
|
1217
|
+
class IncompleteDetails(BaseModel):
|
|
1218
|
+
r"""Details about why the response is incomplete"""
|
|
1219
|
+
|
|
1220
|
+
reason: Reason
|
|
1221
|
+
r"""The reason the response is incomplete"""
|
|
1222
|
+
|
|
1223
|
+
|
|
1224
|
+
CreateResponseOutputRouterResponsesResponse200Type = Literal["function_call",]
|
|
1225
|
+
r"""The type of output item"""
|
|
1226
|
+
|
|
1227
|
+
|
|
1228
|
+
CreateResponseOutputRouterResponsesResponseStatus = Literal[
|
|
1229
|
+
"in_progress",
|
|
1230
|
+
"completed",
|
|
1231
|
+
"incomplete",
|
|
1232
|
+
"failed",
|
|
1233
|
+
]
|
|
1234
|
+
r"""The status of the function call"""
|
|
1235
|
+
|
|
1236
|
+
|
|
1237
|
+
class Output4TypedDict(TypedDict):
|
|
1238
|
+
r"""A function tool call output"""
|
|
1239
|
+
|
|
1240
|
+
id: str
|
|
1241
|
+
r"""The unique identifier for this output item"""
|
|
1242
|
+
type: CreateResponseOutputRouterResponsesResponse200Type
|
|
1243
|
+
r"""The type of output item"""
|
|
1244
|
+
call_id: str
|
|
1245
|
+
r"""The ID of the function call"""
|
|
1246
|
+
name: str
|
|
1247
|
+
r"""The name of the function being called"""
|
|
1248
|
+
arguments: str
|
|
1249
|
+
r"""The arguments to the function as a JSON string"""
|
|
1250
|
+
status: CreateResponseOutputRouterResponsesResponseStatus
|
|
1251
|
+
r"""The status of the function call"""
|
|
1252
|
+
|
|
1253
|
+
|
|
1254
|
+
class Output4(BaseModel):
|
|
1255
|
+
r"""A function tool call output"""
|
|
1256
|
+
|
|
1257
|
+
id: str
|
|
1258
|
+
r"""The unique identifier for this output item"""
|
|
1259
|
+
|
|
1260
|
+
type: CreateResponseOutputRouterResponsesResponse200Type
|
|
1261
|
+
r"""The type of output item"""
|
|
1262
|
+
|
|
1263
|
+
call_id: str
|
|
1264
|
+
r"""The ID of the function call"""
|
|
1265
|
+
|
|
1266
|
+
name: str
|
|
1267
|
+
r"""The name of the function being called"""
|
|
1268
|
+
|
|
1269
|
+
arguments: str
|
|
1270
|
+
r"""The arguments to the function as a JSON string"""
|
|
1271
|
+
|
|
1272
|
+
status: CreateResponseOutputRouterResponsesResponseStatus
|
|
1273
|
+
r"""The status of the function call"""
|
|
1274
|
+
|
|
1275
|
+
|
|
1276
|
+
CreateResponseOutputRouterResponsesResponseType = Literal["file_search_call",]
|
|
1277
|
+
r"""The type of output item"""
|
|
1278
|
+
|
|
1279
|
+
|
|
1280
|
+
CreateResponseOutputRouterResponsesStatus = Literal[
|
|
1281
|
+
"in_progress",
|
|
1282
|
+
"completed",
|
|
1283
|
+
"incomplete",
|
|
1284
|
+
"failed",
|
|
1285
|
+
]
|
|
1286
|
+
r"""The status of the file search"""
|
|
1287
|
+
|
|
1288
|
+
|
|
1289
|
+
class Output3TypedDict(TypedDict):
|
|
1290
|
+
r"""A file search tool call output"""
|
|
1291
|
+
|
|
1292
|
+
id: str
|
|
1293
|
+
r"""The unique identifier for this output item"""
|
|
1294
|
+
type: CreateResponseOutputRouterResponsesResponseType
|
|
1295
|
+
r"""The type of output item"""
|
|
1296
|
+
status: CreateResponseOutputRouterResponsesStatus
|
|
1297
|
+
r"""The status of the file search"""
|
|
1298
|
+
queries: NotRequired[List[str]]
|
|
1299
|
+
r"""The search queries used"""
|
|
1300
|
+
results: NotRequired[Any]
|
|
1301
|
+
r"""The file search results"""
|
|
1302
|
+
|
|
1303
|
+
|
|
1304
|
+
class Output3(BaseModel):
|
|
1305
|
+
r"""A file search tool call output"""
|
|
1306
|
+
|
|
1307
|
+
id: str
|
|
1308
|
+
r"""The unique identifier for this output item"""
|
|
1309
|
+
|
|
1310
|
+
type: CreateResponseOutputRouterResponsesResponseType
|
|
1311
|
+
r"""The type of output item"""
|
|
1312
|
+
|
|
1313
|
+
status: CreateResponseOutputRouterResponsesStatus
|
|
1314
|
+
r"""The status of the file search"""
|
|
1315
|
+
|
|
1316
|
+
queries: Optional[List[str]] = None
|
|
1317
|
+
r"""The search queries used"""
|
|
1318
|
+
|
|
1319
|
+
results: Optional[Any] = None
|
|
1320
|
+
r"""The file search results"""
|
|
1321
|
+
|
|
1322
|
+
@model_serializer(mode="wrap")
|
|
1323
|
+
def serialize_model(self, handler):
|
|
1324
|
+
optional_fields = set(["queries", "results"])
|
|
1325
|
+
serialized = handler(self)
|
|
1326
|
+
m = {}
|
|
1327
|
+
|
|
1328
|
+
for n, f in type(self).model_fields.items():
|
|
1329
|
+
k = f.alias or n
|
|
1330
|
+
val = serialized.get(k)
|
|
1331
|
+
|
|
1332
|
+
if val != UNSET_SENTINEL:
|
|
1333
|
+
if val is not None or k not in optional_fields:
|
|
1334
|
+
m[k] = val
|
|
1335
|
+
|
|
1336
|
+
return m
|
|
1337
|
+
|
|
1338
|
+
|
|
1339
|
+
CreateResponseOutputRouterResponsesType = Literal["web_search_call",]
|
|
1340
|
+
r"""The type of output item"""
|
|
1341
|
+
|
|
1342
|
+
|
|
1343
|
+
CreateResponseOutputStatus = Literal[
|
|
1344
|
+
"in_progress",
|
|
1345
|
+
"completed",
|
|
1346
|
+
"incomplete",
|
|
1347
|
+
"failed",
|
|
1348
|
+
]
|
|
1349
|
+
r"""The status of the web search"""
|
|
1350
|
+
|
|
1351
|
+
|
|
1352
|
+
class Output2TypedDict(TypedDict):
|
|
1353
|
+
r"""A web search tool call output"""
|
|
1354
|
+
|
|
1355
|
+
id: str
|
|
1356
|
+
r"""The unique identifier for this output item"""
|
|
1357
|
+
type: CreateResponseOutputRouterResponsesType
|
|
1358
|
+
r"""The type of output item"""
|
|
1359
|
+
status: CreateResponseOutputStatus
|
|
1360
|
+
r"""The status of the web search"""
|
|
1361
|
+
|
|
1362
|
+
|
|
1363
|
+
class Output2(BaseModel):
|
|
1364
|
+
r"""A web search tool call output"""
|
|
1365
|
+
|
|
1366
|
+
id: str
|
|
1367
|
+
r"""The unique identifier for this output item"""
|
|
1368
|
+
|
|
1369
|
+
type: CreateResponseOutputRouterResponsesType
|
|
1370
|
+
r"""The type of output item"""
|
|
1371
|
+
|
|
1372
|
+
status: CreateResponseOutputStatus
|
|
1373
|
+
r"""The status of the web search"""
|
|
1374
|
+
|
|
1375
|
+
|
|
1376
|
+
CreateResponseOutputType = Literal["message",]
|
|
1377
|
+
r"""The type of output item"""
|
|
1378
|
+
|
|
1379
|
+
|
|
1380
|
+
OutputRole = Literal["assistant",]
|
|
1381
|
+
r"""The role of the message author"""
|
|
1382
|
+
|
|
1383
|
+
|
|
1384
|
+
OutputStatus = Literal[
|
|
1385
|
+
"in_progress",
|
|
1386
|
+
"completed",
|
|
1387
|
+
"incomplete",
|
|
1388
|
+
"failed",
|
|
1389
|
+
]
|
|
1390
|
+
r"""The status of the message"""
|
|
1391
|
+
|
|
1392
|
+
|
|
1393
|
+
ContentType = Literal["output_text",]
|
|
1394
|
+
r"""The type of content part"""
|
|
1395
|
+
|
|
1396
|
+
|
|
1397
|
+
CreateResponseAnnotationsType = Literal["file_citation",]
|
|
1398
|
+
|
|
1399
|
+
|
|
1400
|
+
class Annotations2TypedDict(TypedDict):
|
|
1401
|
+
r"""A citation to a file"""
|
|
1402
|
+
|
|
1403
|
+
type: CreateResponseAnnotationsType
|
|
1404
|
+
index: float
|
|
1405
|
+
r"""The index in the text where the citation appears"""
|
|
1406
|
+
file_id: str
|
|
1407
|
+
r"""The ID of the file being cited"""
|
|
1408
|
+
filename: str
|
|
1409
|
+
r"""The name of the file being cited"""
|
|
1410
|
+
|
|
1411
|
+
|
|
1412
|
+
class Annotations2(BaseModel):
|
|
1413
|
+
r"""A citation to a file"""
|
|
1414
|
+
|
|
1415
|
+
type: CreateResponseAnnotationsType
|
|
1416
|
+
|
|
1417
|
+
index: float
|
|
1418
|
+
r"""The index in the text where the citation appears"""
|
|
1419
|
+
|
|
1420
|
+
file_id: str
|
|
1421
|
+
r"""The ID of the file being cited"""
|
|
1422
|
+
|
|
1423
|
+
filename: str
|
|
1424
|
+
r"""The name of the file being cited"""
|
|
1425
|
+
|
|
1426
|
+
|
|
1427
|
+
AnnotationsType = Literal["url_citation",]
|
|
1428
|
+
|
|
1429
|
+
|
|
1430
|
+
class Annotations1TypedDict(TypedDict):
|
|
1431
|
+
r"""A citation to a URL"""
|
|
1432
|
+
|
|
1433
|
+
type: AnnotationsType
|
|
1434
|
+
start_index: float
|
|
1435
|
+
r"""The start index of the citation in the text"""
|
|
1436
|
+
end_index: float
|
|
1437
|
+
r"""The end index of the citation in the text"""
|
|
1438
|
+
url: str
|
|
1439
|
+
r"""The URL being cited"""
|
|
1440
|
+
title: str
|
|
1441
|
+
r"""The title of the cited resource"""
|
|
1442
|
+
|
|
1443
|
+
|
|
1444
|
+
class Annotations1(BaseModel):
|
|
1445
|
+
r"""A citation to a URL"""
|
|
1446
|
+
|
|
1447
|
+
type: AnnotationsType
|
|
1448
|
+
|
|
1449
|
+
start_index: float
|
|
1450
|
+
r"""The start index of the citation in the text"""
|
|
1451
|
+
|
|
1452
|
+
end_index: float
|
|
1453
|
+
r"""The end index of the citation in the text"""
|
|
1454
|
+
|
|
1455
|
+
url: str
|
|
1456
|
+
r"""The URL being cited"""
|
|
1457
|
+
|
|
1458
|
+
title: str
|
|
1459
|
+
r"""The title of the cited resource"""
|
|
1460
|
+
|
|
1461
|
+
|
|
1462
|
+
AnnotationsTypedDict = TypeAliasType(
|
|
1463
|
+
"AnnotationsTypedDict", Union[Annotations2TypedDict, Annotations1TypedDict]
|
|
1464
|
+
)
|
|
1465
|
+
r"""An annotation in the output text"""
|
|
1466
|
+
|
|
1467
|
+
|
|
1468
|
+
Annotations = Annotated[
|
|
1469
|
+
Union[
|
|
1470
|
+
Annotated[Annotations1, Tag("url_citation")],
|
|
1471
|
+
Annotated[Annotations2, Tag("file_citation")],
|
|
1472
|
+
],
|
|
1473
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1474
|
+
]
|
|
1475
|
+
r"""An annotation in the output text"""
|
|
1476
|
+
|
|
1477
|
+
|
|
1478
|
+
class Content1TypedDict(TypedDict):
|
|
1479
|
+
r"""Text output from the model"""
|
|
1480
|
+
|
|
1481
|
+
type: ContentType
|
|
1482
|
+
r"""The type of content part"""
|
|
1483
|
+
text: str
|
|
1484
|
+
r"""The text content"""
|
|
1485
|
+
annotations: NotRequired[List[AnnotationsTypedDict]]
|
|
1486
|
+
r"""Annotations in the text such as citations"""
|
|
1487
|
+
logprobs: NotRequired[List[Any]]
|
|
1488
|
+
r"""Log probabilities of the output tokens if requested"""
|
|
1489
|
+
|
|
1490
|
+
|
|
1491
|
+
class Content1(BaseModel):
|
|
1492
|
+
r"""Text output from the model"""
|
|
1493
|
+
|
|
1494
|
+
type: ContentType
|
|
1495
|
+
r"""The type of content part"""
|
|
1496
|
+
|
|
1497
|
+
text: str
|
|
1498
|
+
r"""The text content"""
|
|
1499
|
+
|
|
1500
|
+
annotations: Optional[List[Annotations]] = None
|
|
1501
|
+
r"""Annotations in the text such as citations"""
|
|
1502
|
+
|
|
1503
|
+
logprobs: Optional[List[Any]] = None
|
|
1504
|
+
r"""Log probabilities of the output tokens if requested"""
|
|
1505
|
+
|
|
1506
|
+
@model_serializer(mode="wrap")
|
|
1507
|
+
def serialize_model(self, handler):
|
|
1508
|
+
optional_fields = set(["annotations", "logprobs"])
|
|
1509
|
+
serialized = handler(self)
|
|
1510
|
+
m = {}
|
|
1511
|
+
|
|
1512
|
+
for n, f in type(self).model_fields.items():
|
|
1513
|
+
k = f.alias or n
|
|
1514
|
+
val = serialized.get(k)
|
|
1515
|
+
|
|
1516
|
+
if val != UNSET_SENTINEL:
|
|
1517
|
+
if val is not None or k not in optional_fields:
|
|
1518
|
+
m[k] = val
|
|
1519
|
+
|
|
1520
|
+
return m
|
|
1521
|
+
|
|
1522
|
+
|
|
1523
|
+
OutputContentTypedDict = Content1TypedDict
|
|
1524
|
+
|
|
1525
|
+
|
|
1526
|
+
OutputContent = Content1
|
|
1527
|
+
|
|
1528
|
+
|
|
1529
|
+
class Output1TypedDict(TypedDict):
|
|
1530
|
+
r"""An assistant message output"""
|
|
1531
|
+
|
|
1532
|
+
id: str
|
|
1533
|
+
r"""The unique identifier for this message"""
|
|
1534
|
+
type: CreateResponseOutputType
|
|
1535
|
+
r"""The type of output item"""
|
|
1536
|
+
role: OutputRole
|
|
1537
|
+
r"""The role of the message author"""
|
|
1538
|
+
status: OutputStatus
|
|
1539
|
+
r"""The status of the message"""
|
|
1540
|
+
content: NotRequired[List[OutputContentTypedDict]]
|
|
1541
|
+
r"""The content parts of the message"""
|
|
1542
|
+
|
|
1543
|
+
|
|
1544
|
+
class Output1(BaseModel):
|
|
1545
|
+
r"""An assistant message output"""
|
|
1546
|
+
|
|
1547
|
+
id: str
|
|
1548
|
+
r"""The unique identifier for this message"""
|
|
1549
|
+
|
|
1550
|
+
type: CreateResponseOutputType
|
|
1551
|
+
r"""The type of output item"""
|
|
1552
|
+
|
|
1553
|
+
role: OutputRole
|
|
1554
|
+
r"""The role of the message author"""
|
|
1555
|
+
|
|
1556
|
+
status: OutputStatus
|
|
1557
|
+
r"""The status of the message"""
|
|
1558
|
+
|
|
1559
|
+
content: Optional[List[OutputContent]] = None
|
|
1560
|
+
r"""The content parts of the message"""
|
|
1561
|
+
|
|
1562
|
+
@model_serializer(mode="wrap")
|
|
1563
|
+
def serialize_model(self, handler):
|
|
1564
|
+
optional_fields = set(["content"])
|
|
1565
|
+
serialized = handler(self)
|
|
1566
|
+
m = {}
|
|
1567
|
+
|
|
1568
|
+
for n, f in type(self).model_fields.items():
|
|
1569
|
+
k = f.alias or n
|
|
1570
|
+
val = serialized.get(k)
|
|
1571
|
+
|
|
1572
|
+
if val != UNSET_SENTINEL:
|
|
1573
|
+
if val is not None or k not in optional_fields:
|
|
1574
|
+
m[k] = val
|
|
1575
|
+
|
|
1576
|
+
return m
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
OutputTypedDict = TypeAliasType(
|
|
1580
|
+
"OutputTypedDict",
|
|
1581
|
+
Union[Output2TypedDict, Output1TypedDict, Output3TypedDict, Output4TypedDict],
|
|
1582
|
+
)
|
|
1583
|
+
|
|
1584
|
+
|
|
1585
|
+
Output = Annotated[
|
|
1586
|
+
Union[
|
|
1587
|
+
Annotated[Output1, Tag("message")],
|
|
1588
|
+
Annotated[Output2, Tag("web_search_call")],
|
|
1589
|
+
Annotated[Output3, Tag("file_search_call")],
|
|
1590
|
+
Annotated[Output4, Tag("function_call")],
|
|
1591
|
+
],
|
|
1592
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1593
|
+
]
|
|
1594
|
+
|
|
1595
|
+
|
|
1596
|
+
class CreateResponseInputTokensDetailsTypedDict(TypedDict):
|
|
1597
|
+
r"""Breakdown of input token usage"""
|
|
1598
|
+
|
|
1599
|
+
cached_tokens: NotRequired[int]
|
|
1600
|
+
r"""Number of tokens from cache"""
|
|
1601
|
+
|
|
1602
|
+
|
|
1603
|
+
class CreateResponseInputTokensDetails(BaseModel):
|
|
1604
|
+
r"""Breakdown of input token usage"""
|
|
1605
|
+
|
|
1606
|
+
cached_tokens: Optional[int] = None
|
|
1607
|
+
r"""Number of tokens from cache"""
|
|
1608
|
+
|
|
1609
|
+
@model_serializer(mode="wrap")
|
|
1610
|
+
def serialize_model(self, handler):
|
|
1611
|
+
optional_fields = set(["cached_tokens"])
|
|
1612
|
+
serialized = handler(self)
|
|
1613
|
+
m = {}
|
|
1614
|
+
|
|
1615
|
+
for n, f in type(self).model_fields.items():
|
|
1616
|
+
k = f.alias or n
|
|
1617
|
+
val = serialized.get(k)
|
|
1618
|
+
|
|
1619
|
+
if val != UNSET_SENTINEL:
|
|
1620
|
+
if val is not None or k not in optional_fields:
|
|
1621
|
+
m[k] = val
|
|
1622
|
+
|
|
1623
|
+
return m
|
|
1624
|
+
|
|
1625
|
+
|
|
1626
|
+
class OutputTokensDetailsTypedDict(TypedDict):
|
|
1627
|
+
r"""Breakdown of output token usage"""
|
|
1628
|
+
|
|
1629
|
+
reasoning_tokens: NotRequired[int]
|
|
1630
|
+
r"""Number of tokens used for reasoning (o3 models)"""
|
|
1631
|
+
accepted_prediction_tokens: NotRequired[int]
|
|
1632
|
+
r"""Number of tokens generated by automatic prediction that were accepted"""
|
|
1633
|
+
rejected_prediction_tokens: NotRequired[int]
|
|
1634
|
+
r"""Number of tokens generated by automatic prediction that were rejected"""
|
|
1635
|
+
|
|
1636
|
+
|
|
1637
|
+
class OutputTokensDetails(BaseModel):
|
|
1638
|
+
r"""Breakdown of output token usage"""
|
|
1639
|
+
|
|
1640
|
+
reasoning_tokens: Optional[int] = None
|
|
1641
|
+
r"""Number of tokens used for reasoning (o3 models)"""
|
|
1642
|
+
|
|
1643
|
+
accepted_prediction_tokens: Optional[int] = None
|
|
1644
|
+
r"""Number of tokens generated by automatic prediction that were accepted"""
|
|
1645
|
+
|
|
1646
|
+
rejected_prediction_tokens: Optional[int] = None
|
|
1647
|
+
r"""Number of tokens generated by automatic prediction that were rejected"""
|
|
1648
|
+
|
|
1649
|
+
@model_serializer(mode="wrap")
|
|
1650
|
+
def serialize_model(self, handler):
|
|
1651
|
+
optional_fields = set(
|
|
1652
|
+
[
|
|
1653
|
+
"reasoning_tokens",
|
|
1654
|
+
"accepted_prediction_tokens",
|
|
1655
|
+
"rejected_prediction_tokens",
|
|
1656
|
+
]
|
|
1657
|
+
)
|
|
1658
|
+
serialized = handler(self)
|
|
1659
|
+
m = {}
|
|
1660
|
+
|
|
1661
|
+
for n, f in type(self).model_fields.items():
|
|
1662
|
+
k = f.alias or n
|
|
1663
|
+
val = serialized.get(k)
|
|
1664
|
+
|
|
1665
|
+
if val != UNSET_SENTINEL:
|
|
1666
|
+
if val is not None or k not in optional_fields:
|
|
1667
|
+
m[k] = val
|
|
1668
|
+
|
|
1669
|
+
return m
|
|
1670
|
+
|
|
1671
|
+
|
|
1672
|
+
class CreateResponseUsageTypedDict(TypedDict):
|
|
1673
|
+
r"""Usage statistics for the response"""
|
|
1674
|
+
|
|
1675
|
+
input_tokens: NotRequired[float]
|
|
1676
|
+
r"""Number of tokens in the input"""
|
|
1677
|
+
output_tokens: NotRequired[float]
|
|
1678
|
+
r"""Number of tokens in the generated output"""
|
|
1679
|
+
total_tokens: NotRequired[float]
|
|
1680
|
+
r"""Total number of tokens used in the request (input + output)"""
|
|
1681
|
+
input_tokens_details: NotRequired[CreateResponseInputTokensDetailsTypedDict]
|
|
1682
|
+
r"""Breakdown of input token usage"""
|
|
1683
|
+
output_tokens_details: NotRequired[OutputTokensDetailsTypedDict]
|
|
1684
|
+
r"""Breakdown of output token usage"""
|
|
1685
|
+
|
|
1686
|
+
|
|
1687
|
+
class CreateResponseUsage(BaseModel):
|
|
1688
|
+
r"""Usage statistics for the response"""
|
|
1689
|
+
|
|
1690
|
+
input_tokens: Optional[float] = None
|
|
1691
|
+
r"""Number of tokens in the input"""
|
|
1692
|
+
|
|
1693
|
+
output_tokens: Optional[float] = None
|
|
1694
|
+
r"""Number of tokens in the generated output"""
|
|
1695
|
+
|
|
1696
|
+
total_tokens: Optional[float] = None
|
|
1697
|
+
r"""Total number of tokens used in the request (input + output)"""
|
|
1698
|
+
|
|
1699
|
+
input_tokens_details: Optional[CreateResponseInputTokensDetails] = None
|
|
1700
|
+
r"""Breakdown of input token usage"""
|
|
1701
|
+
|
|
1702
|
+
output_tokens_details: Optional[OutputTokensDetails] = None
|
|
1703
|
+
r"""Breakdown of output token usage"""
|
|
1704
|
+
|
|
1705
|
+
@model_serializer(mode="wrap")
|
|
1706
|
+
def serialize_model(self, handler):
|
|
1707
|
+
optional_fields = set(
|
|
1708
|
+
[
|
|
1709
|
+
"input_tokens",
|
|
1710
|
+
"output_tokens",
|
|
1711
|
+
"total_tokens",
|
|
1712
|
+
"input_tokens_details",
|
|
1713
|
+
"output_tokens_details",
|
|
1714
|
+
]
|
|
1715
|
+
)
|
|
1716
|
+
serialized = handler(self)
|
|
1717
|
+
m = {}
|
|
1718
|
+
|
|
1719
|
+
for n, f in type(self).model_fields.items():
|
|
1720
|
+
k = f.alias or n
|
|
1721
|
+
val = serialized.get(k)
|
|
1722
|
+
|
|
1723
|
+
if val != UNSET_SENTINEL:
|
|
1724
|
+
if val is not None or k not in optional_fields:
|
|
1725
|
+
m[k] = val
|
|
1726
|
+
|
|
1727
|
+
return m
|
|
1728
|
+
|
|
1729
|
+
|
|
1730
|
+
CreateResponseToolChoiceRouterResponsesResponseType = Literal["function",]
|
|
1731
|
+
|
|
1732
|
+
|
|
1733
|
+
class CreateResponseToolChoiceFunctionTypedDict(TypedDict):
|
|
1734
|
+
name: str
|
|
1735
|
+
|
|
1736
|
+
|
|
1737
|
+
class CreateResponseToolChoiceFunction(BaseModel):
|
|
1738
|
+
name: str
|
|
1739
|
+
|
|
1740
|
+
|
|
1741
|
+
class CreateResponseToolChoiceRouterResponses2TypedDict(TypedDict):
|
|
1742
|
+
type: CreateResponseToolChoiceRouterResponsesResponseType
|
|
1743
|
+
function: CreateResponseToolChoiceFunctionTypedDict
|
|
1744
|
+
|
|
1745
|
+
|
|
1746
|
+
class CreateResponseToolChoiceRouterResponses2(BaseModel):
|
|
1747
|
+
type: CreateResponseToolChoiceRouterResponsesResponseType
|
|
1748
|
+
|
|
1749
|
+
function: CreateResponseToolChoiceFunction
|
|
1750
|
+
|
|
1751
|
+
|
|
1752
|
+
CreateResponseToolChoiceRouterResponses1 = Literal[
|
|
1753
|
+
"none",
|
|
1754
|
+
"auto",
|
|
1755
|
+
"required",
|
|
1756
|
+
]
|
|
1757
|
+
|
|
1758
|
+
|
|
1759
|
+
CreateResponseRouterResponsesToolChoiceTypedDict = TypeAliasType(
|
|
1760
|
+
"CreateResponseRouterResponsesToolChoiceTypedDict",
|
|
1761
|
+
Union[
|
|
1762
|
+
CreateResponseToolChoiceRouterResponses2TypedDict,
|
|
1763
|
+
CreateResponseToolChoiceRouterResponses1,
|
|
1764
|
+
],
|
|
1765
|
+
)
|
|
1766
|
+
r"""Controls which (if any) tool is called by the model"""
|
|
1767
|
+
|
|
1768
|
+
|
|
1769
|
+
CreateResponseRouterResponsesToolChoice = TypeAliasType(
|
|
1770
|
+
"CreateResponseRouterResponsesToolChoice",
|
|
1771
|
+
Union[
|
|
1772
|
+
CreateResponseToolChoiceRouterResponses2,
|
|
1773
|
+
CreateResponseToolChoiceRouterResponses1,
|
|
1774
|
+
],
|
|
1775
|
+
)
|
|
1776
|
+
r"""Controls which (if any) tool is called by the model"""
|
|
1777
|
+
|
|
1778
|
+
|
|
1779
|
+
CreateResponseToolsRouterResponsesResponse200ApplicationJSONType = Literal[
|
|
1780
|
+
"file_search",
|
|
1781
|
+
]
|
|
1782
|
+
r"""The type of tool"""
|
|
1783
|
+
|
|
1784
|
+
|
|
1785
|
+
ToolsRanker = Literal[
|
|
1786
|
+
"auto",
|
|
1787
|
+
"default_2024_08_21",
|
|
1788
|
+
]
|
|
1789
|
+
r"""The ranking algorithm"""
|
|
1790
|
+
|
|
1791
|
+
|
|
1792
|
+
class ToolsRankingOptionsTypedDict(TypedDict):
|
|
1793
|
+
r"""Options for ranking search results"""
|
|
1794
|
+
|
|
1795
|
+
ranker: NotRequired[ToolsRanker]
|
|
1796
|
+
r"""The ranking algorithm"""
|
|
1797
|
+
score_threshold: NotRequired[float]
|
|
1798
|
+
r"""Minimum relevance score"""
|
|
1799
|
+
|
|
1800
|
+
|
|
1801
|
+
class ToolsRankingOptions(BaseModel):
|
|
1802
|
+
r"""Options for ranking search results"""
|
|
1803
|
+
|
|
1804
|
+
ranker: Optional[ToolsRanker] = "auto"
|
|
1805
|
+
r"""The ranking algorithm"""
|
|
1806
|
+
|
|
1807
|
+
score_threshold: Optional[float] = 0
|
|
1808
|
+
r"""Minimum relevance score"""
|
|
1809
|
+
|
|
1810
|
+
@model_serializer(mode="wrap")
|
|
1811
|
+
def serialize_model(self, handler):
|
|
1812
|
+
optional_fields = set(["ranker", "score_threshold"])
|
|
1813
|
+
serialized = handler(self)
|
|
1814
|
+
m = {}
|
|
1815
|
+
|
|
1816
|
+
for n, f in type(self).model_fields.items():
|
|
1817
|
+
k = f.alias or n
|
|
1818
|
+
val = serialized.get(k)
|
|
1819
|
+
|
|
1820
|
+
if val != UNSET_SENTINEL:
|
|
1821
|
+
if val is not None or k not in optional_fields:
|
|
1822
|
+
m[k] = val
|
|
1823
|
+
|
|
1824
|
+
return m
|
|
1825
|
+
|
|
1826
|
+
|
|
1827
|
+
class CreateResponseTools3TypedDict(TypedDict):
|
|
1828
|
+
r"""Configuration for file search tool"""
|
|
1829
|
+
|
|
1830
|
+
type: CreateResponseToolsRouterResponsesResponse200ApplicationJSONType
|
|
1831
|
+
r"""The type of tool"""
|
|
1832
|
+
vector_store_ids: NotRequired[List[str]]
|
|
1833
|
+
r"""The vector stores to search"""
|
|
1834
|
+
max_num_results: NotRequired[int]
|
|
1835
|
+
r"""Maximum number of results to return"""
|
|
1836
|
+
filters: NotRequired[Any]
|
|
1837
|
+
r"""Filters to apply to the search"""
|
|
1838
|
+
ranking_options: NotRequired[ToolsRankingOptionsTypedDict]
|
|
1839
|
+
r"""Options for ranking search results"""
|
|
1840
|
+
|
|
1841
|
+
|
|
1842
|
+
class CreateResponseTools3(BaseModel):
|
|
1843
|
+
r"""Configuration for file search tool"""
|
|
1844
|
+
|
|
1845
|
+
type: CreateResponseToolsRouterResponsesResponse200ApplicationJSONType
|
|
1846
|
+
r"""The type of tool"""
|
|
1847
|
+
|
|
1848
|
+
vector_store_ids: Optional[List[str]] = None
|
|
1849
|
+
r"""The vector stores to search"""
|
|
1850
|
+
|
|
1851
|
+
max_num_results: Optional[int] = 20
|
|
1852
|
+
r"""Maximum number of results to return"""
|
|
1853
|
+
|
|
1854
|
+
filters: Optional[Any] = None
|
|
1855
|
+
r"""Filters to apply to the search"""
|
|
1856
|
+
|
|
1857
|
+
ranking_options: Optional[ToolsRankingOptions] = None
|
|
1858
|
+
r"""Options for ranking search results"""
|
|
1859
|
+
|
|
1860
|
+
@model_serializer(mode="wrap")
|
|
1861
|
+
def serialize_model(self, handler):
|
|
1862
|
+
optional_fields = set(
|
|
1863
|
+
["vector_store_ids", "max_num_results", "filters", "ranking_options"]
|
|
1864
|
+
)
|
|
1865
|
+
serialized = handler(self)
|
|
1866
|
+
m = {}
|
|
1867
|
+
|
|
1868
|
+
for n, f in type(self).model_fields.items():
|
|
1869
|
+
k = f.alias or n
|
|
1870
|
+
val = serialized.get(k)
|
|
1871
|
+
|
|
1872
|
+
if val != UNSET_SENTINEL:
|
|
1873
|
+
if val is not None or k not in optional_fields:
|
|
1874
|
+
m[k] = val
|
|
1875
|
+
|
|
1876
|
+
return m
|
|
1877
|
+
|
|
1878
|
+
|
|
1879
|
+
CreateResponseToolsRouterResponsesResponse200Type = Literal["web_search_preview",]
|
|
1880
|
+
r"""The type of tool"""
|
|
1881
|
+
|
|
1882
|
+
|
|
1883
|
+
ToolsSearchContextSize = Literal[
|
|
1884
|
+
"small",
|
|
1885
|
+
"medium",
|
|
1886
|
+
"large",
|
|
1887
|
+
]
|
|
1888
|
+
r"""Amount of context to retrieve for each search result"""
|
|
1889
|
+
|
|
1890
|
+
|
|
1891
|
+
CreateResponseToolsRouterResponsesResponse200ApplicationJSONResponseBodyType = Literal[
|
|
1892
|
+
"approximate",
|
|
1893
|
+
"exact",
|
|
1894
|
+
]
|
|
1895
|
+
r"""The type of location"""
|
|
1896
|
+
|
|
1897
|
+
|
|
1898
|
+
class ToolsUserLocationTypedDict(TypedDict):
|
|
1899
|
+
r"""User location for search localization"""
|
|
1900
|
+
|
|
1901
|
+
type: NotRequired[
|
|
1902
|
+
CreateResponseToolsRouterResponsesResponse200ApplicationJSONResponseBodyType
|
|
1903
|
+
]
|
|
1904
|
+
r"""The type of location"""
|
|
1905
|
+
city: NotRequired[Nullable[str]]
|
|
1906
|
+
r"""The city name"""
|
|
1907
|
+
country: NotRequired[str]
|
|
1908
|
+
r"""The country code"""
|
|
1909
|
+
region: NotRequired[Nullable[str]]
|
|
1910
|
+
r"""The region/state"""
|
|
1911
|
+
timezone: NotRequired[Nullable[str]]
|
|
1912
|
+
r"""The timezone"""
|
|
1913
|
+
|
|
1914
|
+
|
|
1915
|
+
class ToolsUserLocation(BaseModel):
|
|
1916
|
+
r"""User location for search localization"""
|
|
1917
|
+
|
|
1918
|
+
type: Optional[
|
|
1919
|
+
CreateResponseToolsRouterResponsesResponse200ApplicationJSONResponseBodyType
|
|
1920
|
+
] = None
|
|
1921
|
+
r"""The type of location"""
|
|
1922
|
+
|
|
1923
|
+
city: OptionalNullable[str] = UNSET
|
|
1924
|
+
r"""The city name"""
|
|
1925
|
+
|
|
1926
|
+
country: Optional[str] = None
|
|
1927
|
+
r"""The country code"""
|
|
1928
|
+
|
|
1929
|
+
region: OptionalNullable[str] = UNSET
|
|
1930
|
+
r"""The region/state"""
|
|
1931
|
+
|
|
1932
|
+
timezone: OptionalNullable[str] = UNSET
|
|
1933
|
+
r"""The timezone"""
|
|
1934
|
+
|
|
1935
|
+
@model_serializer(mode="wrap")
|
|
1936
|
+
def serialize_model(self, handler):
|
|
1937
|
+
optional_fields = set(["type", "city", "country", "region", "timezone"])
|
|
1938
|
+
nullable_fields = set(["city", "region", "timezone"])
|
|
1939
|
+
serialized = handler(self)
|
|
1940
|
+
m = {}
|
|
1941
|
+
|
|
1942
|
+
for n, f in type(self).model_fields.items():
|
|
1943
|
+
k = f.alias or n
|
|
1944
|
+
val = serialized.get(k)
|
|
1945
|
+
is_nullable_and_explicitly_set = (
|
|
1946
|
+
k in nullable_fields
|
|
1947
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1948
|
+
)
|
|
1949
|
+
|
|
1950
|
+
if val != UNSET_SENTINEL:
|
|
1951
|
+
if (
|
|
1952
|
+
val is not None
|
|
1953
|
+
or k not in optional_fields
|
|
1954
|
+
or is_nullable_and_explicitly_set
|
|
1955
|
+
):
|
|
1956
|
+
m[k] = val
|
|
1957
|
+
|
|
1958
|
+
return m
|
|
1959
|
+
|
|
1960
|
+
|
|
1961
|
+
class CreateResponseTools2TypedDict(TypedDict):
|
|
1962
|
+
r"""Configuration for web search tool"""
|
|
1963
|
+
|
|
1964
|
+
type: CreateResponseToolsRouterResponsesResponse200Type
|
|
1965
|
+
r"""The type of tool"""
|
|
1966
|
+
domains: NotRequired[List[str]]
|
|
1967
|
+
r"""List of domains to restrict search to"""
|
|
1968
|
+
search_context_size: NotRequired[ToolsSearchContextSize]
|
|
1969
|
+
r"""Amount of context to retrieve for each search result"""
|
|
1970
|
+
user_location: NotRequired[ToolsUserLocationTypedDict]
|
|
1971
|
+
r"""User location for search localization"""
|
|
1972
|
+
|
|
1973
|
+
|
|
1974
|
+
class CreateResponseTools2(BaseModel):
|
|
1975
|
+
r"""Configuration for web search tool"""
|
|
1976
|
+
|
|
1977
|
+
type: CreateResponseToolsRouterResponsesResponse200Type
|
|
1978
|
+
r"""The type of tool"""
|
|
1979
|
+
|
|
1980
|
+
domains: Optional[List[str]] = None
|
|
1981
|
+
r"""List of domains to restrict search to"""
|
|
1982
|
+
|
|
1983
|
+
search_context_size: Optional[ToolsSearchContextSize] = "medium"
|
|
1984
|
+
r"""Amount of context to retrieve for each search result"""
|
|
1985
|
+
|
|
1986
|
+
user_location: Optional[ToolsUserLocation] = None
|
|
1987
|
+
r"""User location for search localization"""
|
|
1988
|
+
|
|
1989
|
+
@model_serializer(mode="wrap")
|
|
1990
|
+
def serialize_model(self, handler):
|
|
1991
|
+
optional_fields = set(["domains", "search_context_size", "user_location"])
|
|
1992
|
+
serialized = handler(self)
|
|
1993
|
+
m = {}
|
|
1994
|
+
|
|
1995
|
+
for n, f in type(self).model_fields.items():
|
|
1996
|
+
k = f.alias or n
|
|
1997
|
+
val = serialized.get(k)
|
|
1998
|
+
|
|
1999
|
+
if val != UNSET_SENTINEL:
|
|
2000
|
+
if val is not None or k not in optional_fields:
|
|
2001
|
+
m[k] = val
|
|
2002
|
+
|
|
2003
|
+
return m
|
|
2004
|
+
|
|
2005
|
+
|
|
2006
|
+
CreateResponseToolsRouterResponsesResponseType = Literal["function",]
|
|
2007
|
+
r"""The type of tool"""
|
|
2008
|
+
|
|
2009
|
+
|
|
2010
|
+
CreateResponseToolsRouterResponsesResponse200ApplicationJSONResponseBody1Type = Literal[
|
|
2011
|
+
"object",
|
|
2012
|
+
]
|
|
2013
|
+
r"""The type of the parameters object"""
|
|
2014
|
+
|
|
2015
|
+
|
|
2016
|
+
class ToolsPropertiesTypedDict(TypedDict):
|
|
2017
|
+
type: str
|
|
2018
|
+
description: NotRequired[str]
|
|
2019
|
+
enum: NotRequired[List[str]]
|
|
2020
|
+
|
|
2021
|
+
|
|
2022
|
+
class ToolsProperties(BaseModel):
|
|
2023
|
+
model_config = ConfigDict(
|
|
2024
|
+
populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
|
|
2025
|
+
)
|
|
2026
|
+
__pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
|
|
2027
|
+
|
|
2028
|
+
type: str
|
|
2029
|
+
|
|
2030
|
+
description: Optional[str] = None
|
|
2031
|
+
|
|
2032
|
+
enum: Optional[List[str]] = None
|
|
2033
|
+
|
|
2034
|
+
@property
|
|
2035
|
+
def additional_properties(self):
|
|
2036
|
+
return self.__pydantic_extra__
|
|
2037
|
+
|
|
2038
|
+
@additional_properties.setter
|
|
2039
|
+
def additional_properties(self, value):
|
|
2040
|
+
self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
|
|
2041
|
+
|
|
2042
|
+
@model_serializer(mode="wrap")
|
|
2043
|
+
def serialize_model(self, handler):
|
|
2044
|
+
optional_fields = set(["description", "enum"])
|
|
2045
|
+
serialized = handler(self)
|
|
2046
|
+
m = {}
|
|
2047
|
+
|
|
2048
|
+
for n, f in type(self).model_fields.items():
|
|
2049
|
+
k = f.alias or n
|
|
2050
|
+
val = serialized.get(k)
|
|
2051
|
+
serialized.pop(k, None)
|
|
2052
|
+
|
|
2053
|
+
if val != UNSET_SENTINEL:
|
|
2054
|
+
if val is not None or k not in optional_fields:
|
|
2055
|
+
m[k] = val
|
|
2056
|
+
for k, v in serialized.items():
|
|
2057
|
+
m[k] = v
|
|
2058
|
+
|
|
2059
|
+
return m
|
|
2060
|
+
|
|
2061
|
+
|
|
2062
|
+
class CreateResponseToolsParametersTypedDict(TypedDict):
|
|
2063
|
+
r"""The parameters the function accepts"""
|
|
2064
|
+
|
|
2065
|
+
type: CreateResponseToolsRouterResponsesResponse200ApplicationJSONResponseBody1Type
|
|
2066
|
+
r"""The type of the parameters object"""
|
|
2067
|
+
properties: Dict[str, ToolsPropertiesTypedDict]
|
|
2068
|
+
r"""The parameters the function accepts, described as a JSON Schema object"""
|
|
2069
|
+
required: NotRequired[List[str]]
|
|
2070
|
+
r"""List of required parameter names"""
|
|
2071
|
+
additional_properties: NotRequired[bool]
|
|
2072
|
+
r"""Whether to allow properties not defined in the schema"""
|
|
2073
|
+
|
|
2074
|
+
|
|
2075
|
+
class CreateResponseToolsParameters(BaseModel):
|
|
2076
|
+
r"""The parameters the function accepts"""
|
|
2077
|
+
|
|
2078
|
+
type: CreateResponseToolsRouterResponsesResponse200ApplicationJSONResponseBody1Type
|
|
2079
|
+
r"""The type of the parameters object"""
|
|
2080
|
+
|
|
2081
|
+
properties: Dict[str, ToolsProperties]
|
|
2082
|
+
r"""The parameters the function accepts, described as a JSON Schema object"""
|
|
2083
|
+
|
|
2084
|
+
required: Optional[List[str]] = None
|
|
2085
|
+
r"""List of required parameter names"""
|
|
2086
|
+
|
|
2087
|
+
additional_properties: Annotated[
|
|
2088
|
+
Optional[bool], pydantic.Field(alias="additionalProperties")
|
|
2089
|
+
] = None
|
|
2090
|
+
r"""Whether to allow properties not defined in the schema"""
|
|
2091
|
+
|
|
2092
|
+
@model_serializer(mode="wrap")
|
|
2093
|
+
def serialize_model(self, handler):
|
|
2094
|
+
optional_fields = set(["required", "additionalProperties"])
|
|
2095
|
+
serialized = handler(self)
|
|
2096
|
+
m = {}
|
|
2097
|
+
|
|
2098
|
+
for n, f in type(self).model_fields.items():
|
|
2099
|
+
k = f.alias or n
|
|
2100
|
+
val = serialized.get(k)
|
|
2101
|
+
|
|
2102
|
+
if val != UNSET_SENTINEL:
|
|
2103
|
+
if val is not None or k not in optional_fields:
|
|
2104
|
+
m[k] = val
|
|
2105
|
+
|
|
2106
|
+
return m
|
|
2107
|
+
|
|
2108
|
+
|
|
2109
|
+
class CreateResponseTools1TypedDict(TypedDict):
|
|
2110
|
+
r"""A function tool definition"""
|
|
2111
|
+
|
|
2112
|
+
type: CreateResponseToolsRouterResponsesResponseType
|
|
2113
|
+
r"""The type of tool"""
|
|
2114
|
+
name: str
|
|
2115
|
+
r"""The name of the function to be called"""
|
|
2116
|
+
parameters: CreateResponseToolsParametersTypedDict
|
|
2117
|
+
r"""The parameters the function accepts"""
|
|
2118
|
+
description: NotRequired[Nullable[str]]
|
|
2119
|
+
r"""A description of what the function does"""
|
|
2120
|
+
strict: NotRequired[bool]
|
|
2121
|
+
r"""Whether to enable strict schema adherence when generating function calls"""
|
|
2122
|
+
|
|
2123
|
+
|
|
2124
|
+
class CreateResponseTools1(BaseModel):
|
|
2125
|
+
r"""A function tool definition"""
|
|
2126
|
+
|
|
2127
|
+
type: CreateResponseToolsRouterResponsesResponseType
|
|
2128
|
+
r"""The type of tool"""
|
|
2129
|
+
|
|
2130
|
+
name: str
|
|
2131
|
+
r"""The name of the function to be called"""
|
|
2132
|
+
|
|
2133
|
+
parameters: CreateResponseToolsParameters
|
|
2134
|
+
r"""The parameters the function accepts"""
|
|
2135
|
+
|
|
2136
|
+
description: OptionalNullable[str] = UNSET
|
|
2137
|
+
r"""A description of what the function does"""
|
|
2138
|
+
|
|
2139
|
+
strict: Optional[bool] = True
|
|
2140
|
+
r"""Whether to enable strict schema adherence when generating function calls"""
|
|
2141
|
+
|
|
2142
|
+
@model_serializer(mode="wrap")
|
|
2143
|
+
def serialize_model(self, handler):
|
|
2144
|
+
optional_fields = set(["description", "strict"])
|
|
2145
|
+
nullable_fields = set(["description"])
|
|
2146
|
+
serialized = handler(self)
|
|
2147
|
+
m = {}
|
|
2148
|
+
|
|
2149
|
+
for n, f in type(self).model_fields.items():
|
|
2150
|
+
k = f.alias or n
|
|
2151
|
+
val = serialized.get(k)
|
|
2152
|
+
is_nullable_and_explicitly_set = (
|
|
2153
|
+
k in nullable_fields
|
|
2154
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2155
|
+
)
|
|
2156
|
+
|
|
2157
|
+
if val != UNSET_SENTINEL:
|
|
2158
|
+
if (
|
|
2159
|
+
val is not None
|
|
2160
|
+
or k not in optional_fields
|
|
2161
|
+
or is_nullable_and_explicitly_set
|
|
2162
|
+
):
|
|
2163
|
+
m[k] = val
|
|
2164
|
+
|
|
2165
|
+
return m
|
|
2166
|
+
|
|
2167
|
+
|
|
2168
|
+
CreateResponseRouterResponsesToolsTypedDict = TypeAliasType(
|
|
2169
|
+
"CreateResponseRouterResponsesToolsTypedDict",
|
|
2170
|
+
Union[
|
|
2171
|
+
CreateResponseTools2TypedDict,
|
|
2172
|
+
CreateResponseTools1TypedDict,
|
|
2173
|
+
CreateResponseTools3TypedDict,
|
|
2174
|
+
],
|
|
2175
|
+
)
|
|
2176
|
+
|
|
2177
|
+
|
|
2178
|
+
CreateResponseRouterResponsesTools = Annotated[
|
|
2179
|
+
Union[
|
|
2180
|
+
Annotated[CreateResponseTools1, Tag("function")],
|
|
2181
|
+
Annotated[CreateResponseTools2, Tag("web_search_preview")],
|
|
2182
|
+
Annotated[CreateResponseTools3, Tag("file_search")],
|
|
2183
|
+
],
|
|
2184
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2185
|
+
]
|
|
2186
|
+
|
|
2187
|
+
|
|
2188
|
+
class CreateResponseReasoningTypedDict(TypedDict):
|
|
2189
|
+
effort: NotRequired[Nullable[str]]
|
|
2190
|
+
summary: NotRequired[Nullable[str]]
|
|
2191
|
+
|
|
2192
|
+
|
|
2193
|
+
class CreateResponseReasoning(BaseModel):
|
|
2194
|
+
effort: OptionalNullable[str] = UNSET
|
|
2195
|
+
|
|
2196
|
+
summary: OptionalNullable[str] = UNSET
|
|
2197
|
+
|
|
2198
|
+
@model_serializer(mode="wrap")
|
|
2199
|
+
def serialize_model(self, handler):
|
|
2200
|
+
optional_fields = set(["effort", "summary"])
|
|
2201
|
+
nullable_fields = set(["effort", "summary"])
|
|
2202
|
+
serialized = handler(self)
|
|
2203
|
+
m = {}
|
|
2204
|
+
|
|
2205
|
+
for n, f in type(self).model_fields.items():
|
|
2206
|
+
k = f.alias or n
|
|
2207
|
+
val = serialized.get(k)
|
|
2208
|
+
is_nullable_and_explicitly_set = (
|
|
2209
|
+
k in nullable_fields
|
|
2210
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2211
|
+
)
|
|
2212
|
+
|
|
2213
|
+
if val != UNSET_SENTINEL:
|
|
2214
|
+
if (
|
|
2215
|
+
val is not None
|
|
2216
|
+
or k not in optional_fields
|
|
2217
|
+
or is_nullable_and_explicitly_set
|
|
2218
|
+
):
|
|
2219
|
+
m[k] = val
|
|
2220
|
+
|
|
2221
|
+
return m
|
|
2222
|
+
|
|
2223
|
+
|
|
2224
|
+
CreateResponseFormatRouterResponsesResponse200ApplicationJSONType = Literal[
|
|
2225
|
+
"json_schema",
|
|
2226
|
+
]
|
|
2227
|
+
r"""Ensures the response matches a supplied JSON schema"""
|
|
2228
|
+
|
|
2229
|
+
|
|
2230
|
+
class CreateResponseFormat3TypedDict(TypedDict):
|
|
2231
|
+
type: CreateResponseFormatRouterResponsesResponse200ApplicationJSONType
|
|
2232
|
+
r"""Ensures the response matches a supplied JSON schema"""
|
|
2233
|
+
name: str
|
|
2234
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2235
|
+
schema_: Dict[str, Any]
|
|
2236
|
+
r"""The JSON schema to validate the response against"""
|
|
2237
|
+
description: NotRequired[str]
|
|
2238
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
2239
|
+
strict: NotRequired[bool]
|
|
2240
|
+
r"""Whether to enable strict `schema` adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when `strict` is `true`"""
|
|
2241
|
+
|
|
2242
|
+
|
|
2243
|
+
class CreateResponseFormat3(BaseModel):
|
|
2244
|
+
type: CreateResponseFormatRouterResponsesResponse200ApplicationJSONType
|
|
2245
|
+
r"""Ensures the response matches a supplied JSON schema"""
|
|
2246
|
+
|
|
2247
|
+
name: str
|
|
2248
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2249
|
+
|
|
2250
|
+
schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
|
|
2251
|
+
r"""The JSON schema to validate the response against"""
|
|
2252
|
+
|
|
2253
|
+
description: Optional[str] = None
|
|
2254
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
2255
|
+
|
|
2256
|
+
strict: Optional[bool] = True
|
|
2257
|
+
r"""Whether to enable strict `schema` adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when `strict` is `true`"""
|
|
2258
|
+
|
|
2259
|
+
@model_serializer(mode="wrap")
|
|
2260
|
+
def serialize_model(self, handler):
|
|
2261
|
+
optional_fields = set(["description", "strict"])
|
|
2262
|
+
serialized = handler(self)
|
|
2263
|
+
m = {}
|
|
2264
|
+
|
|
2265
|
+
for n, f in type(self).model_fields.items():
|
|
2266
|
+
k = f.alias or n
|
|
2267
|
+
val = serialized.get(k)
|
|
2268
|
+
|
|
2269
|
+
if val != UNSET_SENTINEL:
|
|
2270
|
+
if val is not None or k not in optional_fields:
|
|
2271
|
+
m[k] = val
|
|
2272
|
+
|
|
2273
|
+
return m
|
|
2274
|
+
|
|
2275
|
+
|
|
2276
|
+
CreateResponseFormatRouterResponsesResponse200Type = Literal["json_object",]
|
|
2277
|
+
r"""Ensures the response is a valid JSON object"""
|
|
2278
|
+
|
|
2279
|
+
|
|
2280
|
+
class CreateResponseFormat2TypedDict(TypedDict):
|
|
2281
|
+
type: CreateResponseFormatRouterResponsesResponse200Type
|
|
2282
|
+
r"""Ensures the response is a valid JSON object"""
|
|
2283
|
+
|
|
2284
|
+
|
|
2285
|
+
class CreateResponseFormat2(BaseModel):
|
|
2286
|
+
type: CreateResponseFormatRouterResponsesResponse200Type
|
|
2287
|
+
r"""Ensures the response is a valid JSON object"""
|
|
2288
|
+
|
|
2289
|
+
|
|
2290
|
+
CreateResponseFormatRouterResponsesResponseType = Literal["text",]
|
|
2291
|
+
r"""Plain text response format"""
|
|
2292
|
+
|
|
2293
|
+
|
|
2294
|
+
class CreateResponseFormat1TypedDict(TypedDict):
|
|
2295
|
+
type: CreateResponseFormatRouterResponsesResponseType
|
|
2296
|
+
r"""Plain text response format"""
|
|
2297
|
+
|
|
2298
|
+
|
|
2299
|
+
class CreateResponseFormat1(BaseModel):
|
|
2300
|
+
type: CreateResponseFormatRouterResponsesResponseType
|
|
2301
|
+
r"""Plain text response format"""
|
|
2302
|
+
|
|
2303
|
+
|
|
2304
|
+
CreateResponseRouterResponsesFormatTypedDict = TypeAliasType(
|
|
2305
|
+
"CreateResponseRouterResponsesFormatTypedDict",
|
|
2306
|
+
Union[
|
|
2307
|
+
CreateResponseFormat1TypedDict,
|
|
2308
|
+
CreateResponseFormat2TypedDict,
|
|
2309
|
+
CreateResponseFormat3TypedDict,
|
|
2310
|
+
],
|
|
2311
|
+
)
|
|
2312
|
+
|
|
2313
|
+
|
|
2314
|
+
CreateResponseRouterResponsesFormat = Annotated[
|
|
2315
|
+
Union[
|
|
2316
|
+
Annotated[CreateResponseFormat1, Tag("text")],
|
|
2317
|
+
Annotated[CreateResponseFormat2, Tag("json_object")],
|
|
2318
|
+
Annotated[CreateResponseFormat3, Tag("json_schema")],
|
|
2319
|
+
],
|
|
2320
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2321
|
+
]
|
|
2322
|
+
|
|
2323
|
+
|
|
2324
|
+
class CreateResponseRouterResponsesTextTypedDict(TypedDict):
|
|
2325
|
+
format_: CreateResponseRouterResponsesFormatTypedDict
|
|
2326
|
+
|
|
2327
|
+
|
|
2328
|
+
class CreateResponseRouterResponsesText(BaseModel):
|
|
2329
|
+
format_: Annotated[
|
|
2330
|
+
CreateResponseRouterResponsesFormat, pydantic.Field(alias="format")
|
|
2331
|
+
]
|
|
2332
|
+
|
|
2333
|
+
|
|
2334
|
+
Truncation = Literal[
|
|
2335
|
+
"auto",
|
|
2336
|
+
"disabled",
|
|
2337
|
+
]
|
|
2338
|
+
r"""Controls how the model handles inputs longer than the maximum token length"""
|
|
2339
|
+
|
|
2340
|
+
|
|
2341
|
+
ServiceTier = Literal[
|
|
2342
|
+
"auto",
|
|
2343
|
+
"default",
|
|
2344
|
+
]
|
|
2345
|
+
r"""The service tier used for processing the request"""
|
|
2346
|
+
|
|
2347
|
+
|
|
2348
|
+
class CreateResponseResponseBodyTypedDict(TypedDict):
|
|
2349
|
+
r"""Represents the completed model response returned when `stream` is false"""
|
|
2350
|
+
|
|
2351
|
+
id: str
|
|
2352
|
+
r"""The unique identifier for the response"""
|
|
2353
|
+
object: CreateResponseObject
|
|
2354
|
+
r"""The object type, which is always \"response\" """
|
|
2355
|
+
created_at: float
|
|
2356
|
+
r"""The Unix timestamp (in seconds) of when the response was created"""
|
|
2357
|
+
status: CreateResponseStatus
|
|
2358
|
+
r"""The status of the response"""
|
|
2359
|
+
error: Nullable[CreateResponseErrorTypedDict]
|
|
2360
|
+
r"""The error that occurred, if any"""
|
|
2361
|
+
incomplete_details: Nullable[IncompleteDetailsTypedDict]
|
|
2362
|
+
r"""Details about why the response is incomplete"""
|
|
2363
|
+
model: str
|
|
2364
|
+
r"""The model used to generate the response"""
|
|
2365
|
+
output: List[OutputTypedDict]
|
|
2366
|
+
r"""The list of output items generated by the model"""
|
|
2367
|
+
parallel_tool_calls: bool
|
|
2368
|
+
instructions: NotRequired[Nullable[str]]
|
|
2369
|
+
r"""The instructions provided for the response"""
|
|
2370
|
+
output_text: NotRequired[Nullable[str]]
|
|
2371
|
+
r"""A convenience field with the concatenated text from all text content parts"""
|
|
2372
|
+
usage: NotRequired[CreateResponseUsageTypedDict]
|
|
2373
|
+
r"""Usage statistics for the response"""
|
|
2374
|
+
temperature: NotRequired[Nullable[float]]
|
|
2375
|
+
top_p: NotRequired[Nullable[float]]
|
|
2376
|
+
max_output_tokens: NotRequired[Nullable[int]]
|
|
2377
|
+
previous_response_id: NotRequired[Nullable[str]]
|
|
2378
|
+
metadata: NotRequired[Dict[str, str]]
|
|
2379
|
+
tool_choice: NotRequired[CreateResponseRouterResponsesToolChoiceTypedDict]
|
|
2380
|
+
r"""Controls which (if any) tool is called by the model"""
|
|
2381
|
+
tools: NotRequired[List[CreateResponseRouterResponsesToolsTypedDict]]
|
|
2382
|
+
reasoning: NotRequired[Nullable[CreateResponseReasoningTypedDict]]
|
|
2383
|
+
store: NotRequired[bool]
|
|
2384
|
+
text: NotRequired[CreateResponseRouterResponsesTextTypedDict]
|
|
2385
|
+
truncation: NotRequired[Nullable[Truncation]]
|
|
2386
|
+
r"""Controls how the model handles inputs longer than the maximum token length"""
|
|
2387
|
+
user: NotRequired[Nullable[str]]
|
|
2388
|
+
r"""A unique identifier representing your end-user"""
|
|
2389
|
+
service_tier: NotRequired[Nullable[ServiceTier]]
|
|
2390
|
+
r"""The service tier used for processing the request"""
|
|
2391
|
+
background: NotRequired[Nullable[bool]]
|
|
2392
|
+
r"""Whether the response was processed in the background"""
|
|
2393
|
+
top_logprobs: NotRequired[Nullable[int]]
|
|
2394
|
+
r"""The number of top log probabilities to return for each output token"""
|
|
2395
|
+
logprobs: NotRequired[Nullable[bool]]
|
|
2396
|
+
r"""Whether to return log probabilities of the output tokens"""
|
|
2397
|
+
|
|
2398
|
+
|
|
2399
|
+
class CreateResponseResponseBody(BaseModel):
|
|
2400
|
+
r"""Represents the completed model response returned when `stream` is false"""
|
|
2401
|
+
|
|
2402
|
+
id: str
|
|
2403
|
+
r"""The unique identifier for the response"""
|
|
2404
|
+
|
|
2405
|
+
object: CreateResponseObject
|
|
2406
|
+
r"""The object type, which is always \"response\" """
|
|
2407
|
+
|
|
2408
|
+
created_at: float
|
|
2409
|
+
r"""The Unix timestamp (in seconds) of when the response was created"""
|
|
2410
|
+
|
|
2411
|
+
status: CreateResponseStatus
|
|
2412
|
+
r"""The status of the response"""
|
|
2413
|
+
|
|
2414
|
+
error: Nullable[CreateResponseError]
|
|
2415
|
+
r"""The error that occurred, if any"""
|
|
2416
|
+
|
|
2417
|
+
incomplete_details: Nullable[IncompleteDetails]
|
|
2418
|
+
r"""Details about why the response is incomplete"""
|
|
2419
|
+
|
|
2420
|
+
model: str
|
|
2421
|
+
r"""The model used to generate the response"""
|
|
2422
|
+
|
|
2423
|
+
output: List[Output]
|
|
2424
|
+
r"""The list of output items generated by the model"""
|
|
2425
|
+
|
|
2426
|
+
parallel_tool_calls: bool
|
|
2427
|
+
|
|
2428
|
+
instructions: OptionalNullable[str] = UNSET
|
|
2429
|
+
r"""The instructions provided for the response"""
|
|
2430
|
+
|
|
2431
|
+
output_text: OptionalNullable[str] = UNSET
|
|
2432
|
+
r"""A convenience field with the concatenated text from all text content parts"""
|
|
2433
|
+
|
|
2434
|
+
usage: Optional[CreateResponseUsage] = None
|
|
2435
|
+
r"""Usage statistics for the response"""
|
|
2436
|
+
|
|
2437
|
+
temperature: OptionalNullable[float] = UNSET
|
|
2438
|
+
|
|
2439
|
+
top_p: OptionalNullable[float] = UNSET
|
|
2440
|
+
|
|
2441
|
+
max_output_tokens: OptionalNullable[int] = UNSET
|
|
2442
|
+
|
|
2443
|
+
previous_response_id: OptionalNullable[str] = UNSET
|
|
2444
|
+
|
|
2445
|
+
metadata: Optional[Dict[str, str]] = None
|
|
2446
|
+
|
|
2447
|
+
tool_choice: Optional[CreateResponseRouterResponsesToolChoice] = None
|
|
2448
|
+
r"""Controls which (if any) tool is called by the model"""
|
|
2449
|
+
|
|
2450
|
+
tools: Optional[List[CreateResponseRouterResponsesTools]] = None
|
|
2451
|
+
|
|
2452
|
+
reasoning: OptionalNullable[CreateResponseReasoning] = UNSET
|
|
2453
|
+
|
|
2454
|
+
store: Optional[bool] = None
|
|
2455
|
+
|
|
2456
|
+
text: Optional[CreateResponseRouterResponsesText] = None
|
|
2457
|
+
|
|
2458
|
+
truncation: OptionalNullable[Truncation] = "disabled"
|
|
2459
|
+
r"""Controls how the model handles inputs longer than the maximum token length"""
|
|
2460
|
+
|
|
2461
|
+
user: OptionalNullable[str] = UNSET
|
|
2462
|
+
r"""A unique identifier representing your end-user"""
|
|
2463
|
+
|
|
2464
|
+
service_tier: OptionalNullable[ServiceTier] = UNSET
|
|
2465
|
+
r"""The service tier used for processing the request"""
|
|
2466
|
+
|
|
2467
|
+
background: OptionalNullable[bool] = UNSET
|
|
2468
|
+
r"""Whether the response was processed in the background"""
|
|
2469
|
+
|
|
2470
|
+
top_logprobs: OptionalNullable[int] = UNSET
|
|
2471
|
+
r"""The number of top log probabilities to return for each output token"""
|
|
2472
|
+
|
|
2473
|
+
logprobs: OptionalNullable[bool] = UNSET
|
|
2474
|
+
r"""Whether to return log probabilities of the output tokens"""
|
|
2475
|
+
|
|
2476
|
+
@model_serializer(mode="wrap")
|
|
2477
|
+
def serialize_model(self, handler):
|
|
2478
|
+
optional_fields = set(
|
|
2479
|
+
[
|
|
2480
|
+
"instructions",
|
|
2481
|
+
"output_text",
|
|
2482
|
+
"usage",
|
|
2483
|
+
"temperature",
|
|
2484
|
+
"top_p",
|
|
2485
|
+
"max_output_tokens",
|
|
2486
|
+
"previous_response_id",
|
|
2487
|
+
"metadata",
|
|
2488
|
+
"tool_choice",
|
|
2489
|
+
"tools",
|
|
2490
|
+
"reasoning",
|
|
2491
|
+
"store",
|
|
2492
|
+
"text",
|
|
2493
|
+
"truncation",
|
|
2494
|
+
"user",
|
|
2495
|
+
"service_tier",
|
|
2496
|
+
"background",
|
|
2497
|
+
"top_logprobs",
|
|
2498
|
+
"logprobs",
|
|
2499
|
+
]
|
|
2500
|
+
)
|
|
2501
|
+
nullable_fields = set(
|
|
2502
|
+
[
|
|
2503
|
+
"error",
|
|
2504
|
+
"incomplete_details",
|
|
2505
|
+
"instructions",
|
|
2506
|
+
"output_text",
|
|
2507
|
+
"temperature",
|
|
2508
|
+
"top_p",
|
|
2509
|
+
"max_output_tokens",
|
|
2510
|
+
"previous_response_id",
|
|
2511
|
+
"reasoning",
|
|
2512
|
+
"truncation",
|
|
2513
|
+
"user",
|
|
2514
|
+
"service_tier",
|
|
2515
|
+
"background",
|
|
2516
|
+
"top_logprobs",
|
|
2517
|
+
"logprobs",
|
|
2518
|
+
]
|
|
2519
|
+
)
|
|
2520
|
+
serialized = handler(self)
|
|
2521
|
+
m = {}
|
|
2522
|
+
|
|
2523
|
+
for n, f in type(self).model_fields.items():
|
|
2524
|
+
k = f.alias or n
|
|
2525
|
+
val = serialized.get(k)
|
|
2526
|
+
is_nullable_and_explicitly_set = (
|
|
2527
|
+
k in nullable_fields
|
|
2528
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2529
|
+
)
|
|
2530
|
+
|
|
2531
|
+
if val != UNSET_SENTINEL:
|
|
2532
|
+
if (
|
|
2533
|
+
val is not None
|
|
2534
|
+
or k not in optional_fields
|
|
2535
|
+
or is_nullable_and_explicitly_set
|
|
2536
|
+
):
|
|
2537
|
+
m[k] = val
|
|
2538
|
+
|
|
2539
|
+
return m
|
|
2540
|
+
|
|
2541
|
+
|
|
2542
|
+
CreateResponseResponseTypedDict = TypeAliasType(
|
|
2543
|
+
"CreateResponseResponseTypedDict",
|
|
2544
|
+
Union[
|
|
2545
|
+
CreateResponseResponseBodyTypedDict,
|
|
2546
|
+
Union[
|
|
2547
|
+
eventstreaming.EventStream[
|
|
2548
|
+
CreateResponseRouterResponsesResponseBodyTypedDict
|
|
2549
|
+
],
|
|
2550
|
+
eventstreaming.EventStreamAsync[
|
|
2551
|
+
CreateResponseRouterResponsesResponseBodyTypedDict
|
|
2552
|
+
],
|
|
2553
|
+
],
|
|
2554
|
+
],
|
|
2555
|
+
)
|
|
2556
|
+
|
|
2557
|
+
|
|
2558
|
+
CreateResponseResponse = TypeAliasType(
|
|
2559
|
+
"CreateResponseResponse",
|
|
2560
|
+
Union[
|
|
2561
|
+
CreateResponseResponseBody,
|
|
2562
|
+
Union[
|
|
2563
|
+
eventstreaming.EventStream[CreateResponseRouterResponsesResponseBody],
|
|
2564
|
+
eventstreaming.EventStreamAsync[CreateResponseRouterResponsesResponseBody],
|
|
2565
|
+
],
|
|
2566
|
+
],
|
|
2567
|
+
)
|