orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +332 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +258 -0
- orq_ai_sdk/embeddings.py +238 -0
- orq_ai_sdk/generations.py +272 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5341 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1922 -384
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1375 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +1890 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +384 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +504 -0
- orq_ai_sdk/models/createimageop.py +208 -117
- orq_ai_sdk/models/createimagevariationop.py +486 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2748 -1252
- orq_ai_sdk/models/creatererankop.py +416 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +316 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +562 -0
- orq_ai_sdk/models/createtranslationop.py +540 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1690 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1673 -230
- orq_ai_sdk/models/getpromptversionop.py +1670 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +793 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1684 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1451 -197
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1428 -195
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1951 -404
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2844 -1450
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +660 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +232 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +251 -0
- orq_ai_sdk/transcriptions.py +326 -0
- orq_ai_sdk/translations.py +298 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +254 -0
- orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
- orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -11,6 +11,7 @@ from .imagecontentpartschema import (
|
|
|
11
11
|
ImageContentPartSchemaTypedDict,
|
|
12
12
|
)
|
|
13
13
|
from .publiccontact import PublicContact, PublicContactTypedDict
|
|
14
|
+
from .publicidentity import PublicIdentity, PublicIdentityTypedDict
|
|
14
15
|
from .reasoningpartschema import ReasoningPartSchema, ReasoningPartSchemaTypedDict
|
|
15
16
|
from .redactedreasoningpartschema import (
|
|
16
17
|
RedactedReasoningPartSchema,
|
|
@@ -46,31 +47,37 @@ from typing_extensions import (
|
|
|
46
47
|
)
|
|
47
48
|
|
|
48
49
|
|
|
49
|
-
|
|
50
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Role = Literal[
|
|
51
|
+
"tool",
|
|
52
|
+
]
|
|
50
53
|
r"""The role of the messages author, in this case tool."""
|
|
51
54
|
|
|
52
55
|
|
|
53
|
-
|
|
56
|
+
CreateChatCompletionContentRouterChatCompletionsRequest2TypedDict = (
|
|
57
|
+
TextContentPartSchemaTypedDict
|
|
58
|
+
)
|
|
54
59
|
|
|
55
60
|
|
|
56
|
-
|
|
61
|
+
CreateChatCompletionContentRouterChatCompletionsRequest2 = TextContentPartSchema
|
|
57
62
|
|
|
58
63
|
|
|
59
|
-
|
|
60
|
-
"
|
|
61
|
-
Union[str, List[
|
|
64
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5ContentTypedDict = TypeAliasType(
|
|
65
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5ContentTypedDict",
|
|
66
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletionsRequest2TypedDict]],
|
|
62
67
|
)
|
|
63
68
|
r"""The contents of the tool message."""
|
|
64
69
|
|
|
65
70
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
71
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Content = (
|
|
72
|
+
TypeAliasType(
|
|
73
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Content",
|
|
74
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletionsRequest2]],
|
|
75
|
+
)
|
|
69
76
|
)
|
|
70
77
|
r"""The contents of the tool message."""
|
|
71
78
|
|
|
72
79
|
|
|
73
|
-
|
|
80
|
+
CreateChatCompletionMessagesRouterChatCompletionsType = Literal["ephemeral",]
|
|
74
81
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
75
82
|
|
|
76
83
|
|
|
@@ -88,7 +95,7 @@ Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
|
88
95
|
|
|
89
96
|
|
|
90
97
|
class CreateChatCompletionMessagesCacheControlTypedDict(TypedDict):
|
|
91
|
-
type:
|
|
98
|
+
type: CreateChatCompletionMessagesRouterChatCompletionsType
|
|
92
99
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
93
100
|
ttl: NotRequired[CreateChatCompletionMessagesTTL]
|
|
94
101
|
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
@@ -101,7 +108,7 @@ class CreateChatCompletionMessagesCacheControlTypedDict(TypedDict):
|
|
|
101
108
|
|
|
102
109
|
|
|
103
110
|
class CreateChatCompletionMessagesCacheControl(BaseModel):
|
|
104
|
-
type:
|
|
111
|
+
type: CreateChatCompletionMessagesRouterChatCompletionsType
|
|
105
112
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
106
113
|
|
|
107
114
|
ttl: Optional[CreateChatCompletionMessagesTTL] = "5m"
|
|
@@ -113,11 +120,27 @@ class CreateChatCompletionMessagesCacheControl(BaseModel):
|
|
|
113
120
|
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
114
121
|
"""
|
|
115
122
|
|
|
123
|
+
@model_serializer(mode="wrap")
|
|
124
|
+
def serialize_model(self, handler):
|
|
125
|
+
optional_fields = set(["ttl"])
|
|
126
|
+
serialized = handler(self)
|
|
127
|
+
m = {}
|
|
128
|
+
|
|
129
|
+
for n, f in type(self).model_fields.items():
|
|
130
|
+
k = f.alias or n
|
|
131
|
+
val = serialized.get(k)
|
|
132
|
+
|
|
133
|
+
if val != UNSET_SENTINEL:
|
|
134
|
+
if val is not None or k not in optional_fields:
|
|
135
|
+
m[k] = val
|
|
136
|
+
|
|
137
|
+
return m
|
|
138
|
+
|
|
116
139
|
|
|
117
140
|
class CreateChatCompletionMessagesToolMessageTypedDict(TypedDict):
|
|
118
|
-
role:
|
|
141
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Role
|
|
119
142
|
r"""The role of the messages author, in this case tool."""
|
|
120
|
-
content:
|
|
143
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5ContentTypedDict
|
|
121
144
|
r"""The contents of the tool message."""
|
|
122
145
|
tool_call_id: Nullable[str]
|
|
123
146
|
r"""Tool call that this message is responding to."""
|
|
@@ -125,10 +148,10 @@ class CreateChatCompletionMessagesToolMessageTypedDict(TypedDict):
|
|
|
125
148
|
|
|
126
149
|
|
|
127
150
|
class CreateChatCompletionMessagesToolMessage(BaseModel):
|
|
128
|
-
role:
|
|
151
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Role
|
|
129
152
|
r"""The role of the messages author, in this case tool."""
|
|
130
153
|
|
|
131
|
-
content:
|
|
154
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Content
|
|
132
155
|
r"""The contents of the tool message."""
|
|
133
156
|
|
|
134
157
|
tool_call_id: Nullable[str]
|
|
@@ -138,37 +161,32 @@ class CreateChatCompletionMessagesToolMessage(BaseModel):
|
|
|
138
161
|
|
|
139
162
|
@model_serializer(mode="wrap")
|
|
140
163
|
def serialize_model(self, handler):
|
|
141
|
-
optional_fields = ["cache_control"]
|
|
142
|
-
nullable_fields = ["tool_call_id"]
|
|
143
|
-
null_default_fields = []
|
|
144
|
-
|
|
164
|
+
optional_fields = set(["cache_control"])
|
|
165
|
+
nullable_fields = set(["tool_call_id"])
|
|
145
166
|
serialized = handler(self)
|
|
146
|
-
|
|
147
167
|
m = {}
|
|
148
168
|
|
|
149
169
|
for n, f in type(self).model_fields.items():
|
|
150
170
|
k = f.alias or n
|
|
151
171
|
val = serialized.get(k)
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
):
|
|
165
|
-
m[k] = val
|
|
172
|
+
is_nullable_and_explicitly_set = (
|
|
173
|
+
k in nullable_fields
|
|
174
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if val != UNSET_SENTINEL:
|
|
178
|
+
if (
|
|
179
|
+
val is not None
|
|
180
|
+
or k not in optional_fields
|
|
181
|
+
or is_nullable_and_explicitly_set
|
|
182
|
+
):
|
|
183
|
+
m[k] = val
|
|
166
184
|
|
|
167
185
|
return m
|
|
168
186
|
|
|
169
187
|
|
|
170
|
-
|
|
171
|
-
"
|
|
188
|
+
CreateChatCompletionContentRouterChatCompletions2TypedDict = TypeAliasType(
|
|
189
|
+
"CreateChatCompletionContentRouterChatCompletions2TypedDict",
|
|
172
190
|
Union[
|
|
173
191
|
RefusalPartSchemaTypedDict,
|
|
174
192
|
RedactedReasoningPartSchemaTypedDict,
|
|
@@ -178,7 +196,7 @@ CreateChatCompletionContentRouter2TypedDict = TypeAliasType(
|
|
|
178
196
|
)
|
|
179
197
|
|
|
180
198
|
|
|
181
|
-
|
|
199
|
+
CreateChatCompletionContentRouterChatCompletions2 = Annotated[
|
|
182
200
|
Union[
|
|
183
201
|
Annotated[TextContentPartSchema, Tag("text")],
|
|
184
202
|
Annotated[RefusalPartSchema, Tag("refusal")],
|
|
@@ -189,21 +207,25 @@ CreateChatCompletionContentRouter2 = Annotated[
|
|
|
189
207
|
]
|
|
190
208
|
|
|
191
209
|
|
|
192
|
-
|
|
193
|
-
"
|
|
194
|
-
Union[str, List[
|
|
210
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContentTypedDict = TypeAliasType(
|
|
211
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContentTypedDict",
|
|
212
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletions2TypedDict]],
|
|
195
213
|
)
|
|
196
214
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
197
215
|
|
|
198
216
|
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
217
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContent = (
|
|
218
|
+
TypeAliasType(
|
|
219
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContent",
|
|
220
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletions2]],
|
|
221
|
+
)
|
|
202
222
|
)
|
|
203
223
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
204
224
|
|
|
205
225
|
|
|
206
|
-
|
|
226
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyRole = Literal[
|
|
227
|
+
"assistant",
|
|
228
|
+
]
|
|
207
229
|
r"""The role of the messages author, in this case `assistant`."""
|
|
208
230
|
|
|
209
231
|
|
|
@@ -239,6 +261,22 @@ class CreateChatCompletionMessagesFunction(BaseModel):
|
|
|
239
261
|
arguments: Optional[str] = None
|
|
240
262
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
241
263
|
|
|
264
|
+
@model_serializer(mode="wrap")
|
|
265
|
+
def serialize_model(self, handler):
|
|
266
|
+
optional_fields = set(["name", "arguments"])
|
|
267
|
+
serialized = handler(self)
|
|
268
|
+
m = {}
|
|
269
|
+
|
|
270
|
+
for n, f in type(self).model_fields.items():
|
|
271
|
+
k = f.alias or n
|
|
272
|
+
val = serialized.get(k)
|
|
273
|
+
|
|
274
|
+
if val != UNSET_SENTINEL:
|
|
275
|
+
if val is not None or k not in optional_fields:
|
|
276
|
+
m[k] = val
|
|
277
|
+
|
|
278
|
+
return m
|
|
279
|
+
|
|
242
280
|
|
|
243
281
|
class CreateChatCompletionMessagesToolCallsTypedDict(TypedDict):
|
|
244
282
|
id: str
|
|
@@ -262,12 +300,30 @@ class CreateChatCompletionMessagesToolCalls(BaseModel):
|
|
|
262
300
|
thought_signature: Optional[str] = None
|
|
263
301
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
264
302
|
|
|
303
|
+
@model_serializer(mode="wrap")
|
|
304
|
+
def serialize_model(self, handler):
|
|
305
|
+
optional_fields = set(["thought_signature"])
|
|
306
|
+
serialized = handler(self)
|
|
307
|
+
m = {}
|
|
308
|
+
|
|
309
|
+
for n, f in type(self).model_fields.items():
|
|
310
|
+
k = f.alias or n
|
|
311
|
+
val = serialized.get(k)
|
|
312
|
+
|
|
313
|
+
if val != UNSET_SENTINEL:
|
|
314
|
+
if val is not None or k not in optional_fields:
|
|
315
|
+
m[k] = val
|
|
316
|
+
|
|
317
|
+
return m
|
|
318
|
+
|
|
265
319
|
|
|
266
320
|
class CreateChatCompletionMessagesAssistantMessageTypedDict(TypedDict):
|
|
267
|
-
role:
|
|
321
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyRole
|
|
268
322
|
r"""The role of the messages author, in this case `assistant`."""
|
|
269
323
|
content: NotRequired[
|
|
270
|
-
Nullable[
|
|
324
|
+
Nullable[
|
|
325
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContentTypedDict
|
|
326
|
+
]
|
|
271
327
|
]
|
|
272
328
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
273
329
|
refusal: NotRequired[Nullable[str]]
|
|
@@ -281,11 +337,11 @@ class CreateChatCompletionMessagesAssistantMessageTypedDict(TypedDict):
|
|
|
281
337
|
|
|
282
338
|
|
|
283
339
|
class CreateChatCompletionMessagesAssistantMessage(BaseModel):
|
|
284
|
-
role:
|
|
340
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyRole
|
|
285
341
|
r"""The role of the messages author, in this case `assistant`."""
|
|
286
342
|
|
|
287
343
|
content: OptionalNullable[
|
|
288
|
-
|
|
344
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContent
|
|
289
345
|
] = UNSET
|
|
290
346
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
291
347
|
|
|
@@ -303,36 +359,31 @@ class CreateChatCompletionMessagesAssistantMessage(BaseModel):
|
|
|
303
359
|
|
|
304
360
|
@model_serializer(mode="wrap")
|
|
305
361
|
def serialize_model(self, handler):
|
|
306
|
-
optional_fields = ["content", "refusal", "name", "audio", "tool_calls"]
|
|
307
|
-
nullable_fields = ["content", "refusal", "audio"]
|
|
308
|
-
null_default_fields = []
|
|
309
|
-
|
|
362
|
+
optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
|
|
363
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
310
364
|
serialized = handler(self)
|
|
311
|
-
|
|
312
365
|
m = {}
|
|
313
366
|
|
|
314
367
|
for n, f in type(self).model_fields.items():
|
|
315
368
|
k = f.alias or n
|
|
316
369
|
val = serialized.get(k)
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
):
|
|
330
|
-
m[k] = val
|
|
370
|
+
is_nullable_and_explicitly_set = (
|
|
371
|
+
k in nullable_fields
|
|
372
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
if val != UNSET_SENTINEL:
|
|
376
|
+
if (
|
|
377
|
+
val is not None
|
|
378
|
+
or k not in optional_fields
|
|
379
|
+
or is_nullable_and_explicitly_set
|
|
380
|
+
):
|
|
381
|
+
m[k] = val
|
|
331
382
|
|
|
332
383
|
return m
|
|
333
384
|
|
|
334
385
|
|
|
335
|
-
|
|
386
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRole = Literal["user",]
|
|
336
387
|
r"""The role of the messages author, in this case `user`."""
|
|
337
388
|
|
|
338
389
|
|
|
@@ -340,7 +391,7 @@ CreateChatCompletion2Type = Literal["file",]
|
|
|
340
391
|
r"""The type of the content part. Always `file`."""
|
|
341
392
|
|
|
342
393
|
|
|
343
|
-
|
|
394
|
+
CreateChatCompletion2RouterChatCompletionsType = Literal["ephemeral",]
|
|
344
395
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
345
396
|
|
|
346
397
|
|
|
@@ -358,7 +409,7 @@ Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
|
358
409
|
|
|
359
410
|
|
|
360
411
|
class CreateChatCompletion2CacheControlTypedDict(TypedDict):
|
|
361
|
-
type:
|
|
412
|
+
type: CreateChatCompletion2RouterChatCompletionsType
|
|
362
413
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
363
414
|
ttl: NotRequired[CreateChatCompletion2TTL]
|
|
364
415
|
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
@@ -371,7 +422,7 @@ class CreateChatCompletion2CacheControlTypedDict(TypedDict):
|
|
|
371
422
|
|
|
372
423
|
|
|
373
424
|
class CreateChatCompletion2CacheControl(BaseModel):
|
|
374
|
-
type:
|
|
425
|
+
type: CreateChatCompletion2RouterChatCompletionsType
|
|
375
426
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
376
427
|
|
|
377
428
|
ttl: Optional[CreateChatCompletion2TTL] = "5m"
|
|
@@ -383,6 +434,22 @@ class CreateChatCompletion2CacheControl(BaseModel):
|
|
|
383
434
|
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
384
435
|
"""
|
|
385
436
|
|
|
437
|
+
@model_serializer(mode="wrap")
|
|
438
|
+
def serialize_model(self, handler):
|
|
439
|
+
optional_fields = set(["ttl"])
|
|
440
|
+
serialized = handler(self)
|
|
441
|
+
m = {}
|
|
442
|
+
|
|
443
|
+
for n, f in type(self).model_fields.items():
|
|
444
|
+
k = f.alias or n
|
|
445
|
+
val = serialized.get(k)
|
|
446
|
+
|
|
447
|
+
if val != UNSET_SENTINEL:
|
|
448
|
+
if val is not None or k not in optional_fields:
|
|
449
|
+
m[k] = val
|
|
450
|
+
|
|
451
|
+
return m
|
|
452
|
+
|
|
386
453
|
|
|
387
454
|
class CreateChatCompletion24TypedDict(TypedDict):
|
|
388
455
|
type: CreateChatCompletion2Type
|
|
@@ -401,6 +468,22 @@ class CreateChatCompletion24(BaseModel):
|
|
|
401
468
|
|
|
402
469
|
cache_control: Optional[CreateChatCompletion2CacheControl] = None
|
|
403
470
|
|
|
471
|
+
@model_serializer(mode="wrap")
|
|
472
|
+
def serialize_model(self, handler):
|
|
473
|
+
optional_fields = set(["cache_control"])
|
|
474
|
+
serialized = handler(self)
|
|
475
|
+
m = {}
|
|
476
|
+
|
|
477
|
+
for n, f in type(self).model_fields.items():
|
|
478
|
+
k = f.alias or n
|
|
479
|
+
val = serialized.get(k)
|
|
480
|
+
|
|
481
|
+
if val != UNSET_SENTINEL:
|
|
482
|
+
if val is not None or k not in optional_fields:
|
|
483
|
+
m[k] = val
|
|
484
|
+
|
|
485
|
+
return m
|
|
486
|
+
|
|
404
487
|
|
|
405
488
|
CreateChatCompletionContent2TypedDict = TypeAliasType(
|
|
406
489
|
"CreateChatCompletionContent2TypedDict",
|
|
@@ -424,76 +507,111 @@ CreateChatCompletionContent2 = Annotated[
|
|
|
424
507
|
]
|
|
425
508
|
|
|
426
509
|
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
510
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestContentTypedDict = (
|
|
511
|
+
TypeAliasType(
|
|
512
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestContentTypedDict",
|
|
513
|
+
Union[str, List[CreateChatCompletionContent2TypedDict]],
|
|
514
|
+
)
|
|
430
515
|
)
|
|
431
516
|
r"""The contents of the user message."""
|
|
432
517
|
|
|
433
518
|
|
|
434
|
-
|
|
435
|
-
"
|
|
519
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestContent = TypeAliasType(
|
|
520
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestContent",
|
|
436
521
|
Union[str, List[CreateChatCompletionContent2]],
|
|
437
522
|
)
|
|
438
523
|
r"""The contents of the user message."""
|
|
439
524
|
|
|
440
525
|
|
|
441
526
|
class CreateChatCompletionMessagesUserMessageTypedDict(TypedDict):
|
|
442
|
-
role:
|
|
527
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRole
|
|
443
528
|
r"""The role of the messages author, in this case `user`."""
|
|
444
|
-
content:
|
|
529
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestContentTypedDict
|
|
445
530
|
r"""The contents of the user message."""
|
|
446
531
|
name: NotRequired[str]
|
|
447
532
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
448
533
|
|
|
449
534
|
|
|
450
535
|
class CreateChatCompletionMessagesUserMessage(BaseModel):
|
|
451
|
-
role:
|
|
536
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRole
|
|
452
537
|
r"""The role of the messages author, in this case `user`."""
|
|
453
538
|
|
|
454
|
-
content:
|
|
539
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestContent
|
|
455
540
|
r"""The contents of the user message."""
|
|
456
541
|
|
|
457
542
|
name: Optional[str] = None
|
|
458
543
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
459
544
|
|
|
545
|
+
@model_serializer(mode="wrap")
|
|
546
|
+
def serialize_model(self, handler):
|
|
547
|
+
optional_fields = set(["name"])
|
|
548
|
+
serialized = handler(self)
|
|
549
|
+
m = {}
|
|
460
550
|
|
|
461
|
-
|
|
551
|
+
for n, f in type(self).model_fields.items():
|
|
552
|
+
k = f.alias or n
|
|
553
|
+
val = serialized.get(k)
|
|
554
|
+
|
|
555
|
+
if val != UNSET_SENTINEL:
|
|
556
|
+
if val is not None or k not in optional_fields:
|
|
557
|
+
m[k] = val
|
|
558
|
+
|
|
559
|
+
return m
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
CreateChatCompletionMessagesRouterChatCompletionsRole = Literal["developer",]
|
|
462
563
|
r"""The role of the messages author, in this case `developer`."""
|
|
463
564
|
|
|
464
565
|
|
|
465
|
-
|
|
466
|
-
"
|
|
566
|
+
CreateChatCompletionMessagesRouterChatCompletionsContentTypedDict = TypeAliasType(
|
|
567
|
+
"CreateChatCompletionMessagesRouterChatCompletionsContentTypedDict",
|
|
467
568
|
Union[str, List[TextContentPartSchemaTypedDict]],
|
|
468
569
|
)
|
|
469
570
|
r"""The contents of the developer message."""
|
|
470
571
|
|
|
471
572
|
|
|
472
|
-
|
|
473
|
-
"
|
|
573
|
+
CreateChatCompletionMessagesRouterChatCompletionsContent = TypeAliasType(
|
|
574
|
+
"CreateChatCompletionMessagesRouterChatCompletionsContent",
|
|
575
|
+
Union[str, List[TextContentPartSchema]],
|
|
474
576
|
)
|
|
475
577
|
r"""The contents of the developer message."""
|
|
476
578
|
|
|
477
579
|
|
|
478
580
|
class CreateChatCompletionMessagesDeveloperMessageTypedDict(TypedDict):
|
|
479
|
-
role:
|
|
581
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRole
|
|
480
582
|
r"""The role of the messages author, in this case `developer`."""
|
|
481
|
-
content:
|
|
583
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsContentTypedDict
|
|
482
584
|
r"""The contents of the developer message."""
|
|
483
585
|
name: NotRequired[str]
|
|
484
586
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
485
587
|
|
|
486
588
|
|
|
487
589
|
class CreateChatCompletionMessagesDeveloperMessage(BaseModel):
|
|
488
|
-
role:
|
|
590
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRole
|
|
489
591
|
r"""The role of the messages author, in this case `developer`."""
|
|
490
592
|
|
|
491
|
-
content:
|
|
593
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsContent
|
|
492
594
|
r"""The contents of the developer message."""
|
|
493
595
|
|
|
494
596
|
name: Optional[str] = None
|
|
495
597
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
496
598
|
|
|
599
|
+
@model_serializer(mode="wrap")
|
|
600
|
+
def serialize_model(self, handler):
|
|
601
|
+
optional_fields = set(["name"])
|
|
602
|
+
serialized = handler(self)
|
|
603
|
+
m = {}
|
|
604
|
+
|
|
605
|
+
for n, f in type(self).model_fields.items():
|
|
606
|
+
k = f.alias or n
|
|
607
|
+
val = serialized.get(k)
|
|
608
|
+
|
|
609
|
+
if val != UNSET_SENTINEL:
|
|
610
|
+
if val is not None or k not in optional_fields:
|
|
611
|
+
m[k] = val
|
|
612
|
+
|
|
613
|
+
return m
|
|
614
|
+
|
|
497
615
|
|
|
498
616
|
CreateChatCompletionMessagesRole = Literal["system",]
|
|
499
617
|
r"""The role of the messages author, in this case `system`."""
|
|
@@ -535,6 +653,22 @@ class CreateChatCompletionMessagesSystemMessage(BaseModel):
|
|
|
535
653
|
name: Optional[str] = None
|
|
536
654
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
537
655
|
|
|
656
|
+
@model_serializer(mode="wrap")
|
|
657
|
+
def serialize_model(self, handler):
|
|
658
|
+
optional_fields = set(["name"])
|
|
659
|
+
serialized = handler(self)
|
|
660
|
+
m = {}
|
|
661
|
+
|
|
662
|
+
for n, f in type(self).model_fields.items():
|
|
663
|
+
k = f.alias or n
|
|
664
|
+
val = serialized.get(k)
|
|
665
|
+
|
|
666
|
+
if val != UNSET_SENTINEL:
|
|
667
|
+
if val is not None or k not in optional_fields:
|
|
668
|
+
m[k] = val
|
|
669
|
+
|
|
670
|
+
return m
|
|
671
|
+
|
|
538
672
|
|
|
539
673
|
CreateChatCompletionMessagesTypedDict = TypeAliasType(
|
|
540
674
|
"CreateChatCompletionMessagesTypedDict",
|
|
@@ -600,10 +734,14 @@ class CreateChatCompletionAudio(BaseModel):
|
|
|
600
734
|
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
601
735
|
|
|
602
736
|
|
|
603
|
-
|
|
737
|
+
CreateChatCompletionResponseFormatRouterChatCompletionsRequestType = Literal[
|
|
738
|
+
"json_schema",
|
|
739
|
+
]
|
|
604
740
|
|
|
605
741
|
|
|
606
|
-
class
|
|
742
|
+
class CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchemaTypedDict(
|
|
743
|
+
TypedDict
|
|
744
|
+
):
|
|
607
745
|
name: str
|
|
608
746
|
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
609
747
|
description: NotRequired[str]
|
|
@@ -614,7 +752,7 @@ class CreateChatCompletionResponseFormatRouterJSONSchemaTypedDict(TypedDict):
|
|
|
614
752
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
615
753
|
|
|
616
754
|
|
|
617
|
-
class
|
|
755
|
+
class CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchema(BaseModel):
|
|
618
756
|
name: str
|
|
619
757
|
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
620
758
|
|
|
@@ -627,6 +765,22 @@ class CreateChatCompletionResponseFormatRouterJSONSchema(BaseModel):
|
|
|
627
765
|
strict: Optional[bool] = False
|
|
628
766
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
629
767
|
|
|
768
|
+
@model_serializer(mode="wrap")
|
|
769
|
+
def serialize_model(self, handler):
|
|
770
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
771
|
+
serialized = handler(self)
|
|
772
|
+
m = {}
|
|
773
|
+
|
|
774
|
+
for n, f in type(self).model_fields.items():
|
|
775
|
+
k = f.alias or n
|
|
776
|
+
val = serialized.get(k)
|
|
777
|
+
|
|
778
|
+
if val != UNSET_SENTINEL:
|
|
779
|
+
if val is not None or k not in optional_fields:
|
|
780
|
+
m[k] = val
|
|
781
|
+
|
|
782
|
+
return m
|
|
783
|
+
|
|
630
784
|
|
|
631
785
|
class CreateChatCompletionResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
632
786
|
r"""
|
|
@@ -634,8 +788,10 @@ class CreateChatCompletionResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
|
634
788
|
JSON Schema response format. Used to generate structured JSON responses
|
|
635
789
|
"""
|
|
636
790
|
|
|
637
|
-
type:
|
|
638
|
-
json_schema:
|
|
791
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsRequestType
|
|
792
|
+
json_schema: (
|
|
793
|
+
CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchemaTypedDict
|
|
794
|
+
)
|
|
639
795
|
|
|
640
796
|
|
|
641
797
|
class CreateChatCompletionResponseFormatJSONSchema(BaseModel):
|
|
@@ -644,12 +800,12 @@ class CreateChatCompletionResponseFormatJSONSchema(BaseModel):
|
|
|
644
800
|
JSON Schema response format. Used to generate structured JSON responses
|
|
645
801
|
"""
|
|
646
802
|
|
|
647
|
-
type:
|
|
803
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsRequestType
|
|
648
804
|
|
|
649
|
-
json_schema:
|
|
805
|
+
json_schema: CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchema
|
|
650
806
|
|
|
651
807
|
|
|
652
|
-
|
|
808
|
+
CreateChatCompletionResponseFormatRouterChatCompletionsType = Literal["json_object",]
|
|
653
809
|
|
|
654
810
|
|
|
655
811
|
class CreateChatCompletionResponseFormatJSONObjectTypedDict(TypedDict):
|
|
@@ -658,7 +814,7 @@ class CreateChatCompletionResponseFormatJSONObjectTypedDict(TypedDict):
|
|
|
658
814
|
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
659
815
|
"""
|
|
660
816
|
|
|
661
|
-
type:
|
|
817
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsType
|
|
662
818
|
|
|
663
819
|
|
|
664
820
|
class CreateChatCompletionResponseFormatJSONObject(BaseModel):
|
|
@@ -667,7 +823,7 @@ class CreateChatCompletionResponseFormatJSONObject(BaseModel):
|
|
|
667
823
|
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
668
824
|
"""
|
|
669
825
|
|
|
670
|
-
type:
|
|
826
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsType
|
|
671
827
|
|
|
672
828
|
|
|
673
829
|
CreateChatCompletionResponseFormatType = Literal["text",]
|
|
@@ -757,6 +913,22 @@ class CreateChatCompletionStreamOptions(BaseModel):
|
|
|
757
913
|
include_usage: Optional[bool] = None
|
|
758
914
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
759
915
|
|
|
916
|
+
@model_serializer(mode="wrap")
|
|
917
|
+
def serialize_model(self, handler):
|
|
918
|
+
optional_fields = set(["include_usage"])
|
|
919
|
+
serialized = handler(self)
|
|
920
|
+
m = {}
|
|
921
|
+
|
|
922
|
+
for n, f in type(self).model_fields.items():
|
|
923
|
+
k = f.alias or n
|
|
924
|
+
val = serialized.get(k)
|
|
925
|
+
|
|
926
|
+
if val != UNSET_SENTINEL:
|
|
927
|
+
if val is not None or k not in optional_fields:
|
|
928
|
+
m[k] = val
|
|
929
|
+
|
|
930
|
+
return m
|
|
931
|
+
|
|
760
932
|
|
|
761
933
|
CreateChatCompletionThinkingTypedDict = TypeAliasType(
|
|
762
934
|
"CreateChatCompletionThinkingTypedDict",
|
|
@@ -777,13 +949,13 @@ CreateChatCompletionType = Literal["function",]
|
|
|
777
949
|
r"""The type of the tool. Currently, only function is supported."""
|
|
778
950
|
|
|
779
951
|
|
|
780
|
-
|
|
952
|
+
CreateChatCompletionRouterChatCompletionsType = Literal["object",]
|
|
781
953
|
|
|
782
954
|
|
|
783
955
|
class CreateChatCompletionParametersTypedDict(TypedDict):
|
|
784
956
|
r"""The parameters the functions accepts, described as a JSON Schema object"""
|
|
785
957
|
|
|
786
|
-
type:
|
|
958
|
+
type: CreateChatCompletionRouterChatCompletionsType
|
|
787
959
|
properties: Dict[str, Any]
|
|
788
960
|
required: NotRequired[List[str]]
|
|
789
961
|
additional_properties: NotRequired[bool]
|
|
@@ -792,7 +964,7 @@ class CreateChatCompletionParametersTypedDict(TypedDict):
|
|
|
792
964
|
class CreateChatCompletionParameters(BaseModel):
|
|
793
965
|
r"""The parameters the functions accepts, described as a JSON Schema object"""
|
|
794
966
|
|
|
795
|
-
type:
|
|
967
|
+
type: CreateChatCompletionRouterChatCompletionsType
|
|
796
968
|
|
|
797
969
|
properties: Dict[str, Any]
|
|
798
970
|
|
|
@@ -802,6 +974,22 @@ class CreateChatCompletionParameters(BaseModel):
|
|
|
802
974
|
Optional[bool], pydantic.Field(alias="additionalProperties")
|
|
803
975
|
] = None
|
|
804
976
|
|
|
977
|
+
@model_serializer(mode="wrap")
|
|
978
|
+
def serialize_model(self, handler):
|
|
979
|
+
optional_fields = set(["required", "additionalProperties"])
|
|
980
|
+
serialized = handler(self)
|
|
981
|
+
m = {}
|
|
982
|
+
|
|
983
|
+
for n, f in type(self).model_fields.items():
|
|
984
|
+
k = f.alias or n
|
|
985
|
+
val = serialized.get(k)
|
|
986
|
+
|
|
987
|
+
if val != UNSET_SENTINEL:
|
|
988
|
+
if val is not None or k not in optional_fields:
|
|
989
|
+
m[k] = val
|
|
990
|
+
|
|
991
|
+
return m
|
|
992
|
+
|
|
805
993
|
|
|
806
994
|
class CreateChatCompletionFunctionTypedDict(TypedDict):
|
|
807
995
|
name: str
|
|
@@ -827,6 +1015,22 @@ class CreateChatCompletionFunction(BaseModel):
|
|
|
827
1015
|
strict: Optional[bool] = None
|
|
828
1016
|
r"""Whether to enable strict schema adherence when generating the function call."""
|
|
829
1017
|
|
|
1018
|
+
@model_serializer(mode="wrap")
|
|
1019
|
+
def serialize_model(self, handler):
|
|
1020
|
+
optional_fields = set(["description", "parameters", "strict"])
|
|
1021
|
+
serialized = handler(self)
|
|
1022
|
+
m = {}
|
|
1023
|
+
|
|
1024
|
+
for n, f in type(self).model_fields.items():
|
|
1025
|
+
k = f.alias or n
|
|
1026
|
+
val = serialized.get(k)
|
|
1027
|
+
|
|
1028
|
+
if val != UNSET_SENTINEL:
|
|
1029
|
+
if val is not None or k not in optional_fields:
|
|
1030
|
+
m[k] = val
|
|
1031
|
+
|
|
1032
|
+
return m
|
|
1033
|
+
|
|
830
1034
|
|
|
831
1035
|
class CreateChatCompletionToolsTypedDict(TypedDict):
|
|
832
1036
|
function: CreateChatCompletionFunctionTypedDict
|
|
@@ -840,6 +1044,22 @@ class CreateChatCompletionTools(BaseModel):
|
|
|
840
1044
|
type: Optional[CreateChatCompletionType] = None
|
|
841
1045
|
r"""The type of the tool. Currently, only function is supported."""
|
|
842
1046
|
|
|
1047
|
+
@model_serializer(mode="wrap")
|
|
1048
|
+
def serialize_model(self, handler):
|
|
1049
|
+
optional_fields = set(["type"])
|
|
1050
|
+
serialized = handler(self)
|
|
1051
|
+
m = {}
|
|
1052
|
+
|
|
1053
|
+
for n, f in type(self).model_fields.items():
|
|
1054
|
+
k = f.alias or n
|
|
1055
|
+
val = serialized.get(k)
|
|
1056
|
+
|
|
1057
|
+
if val != UNSET_SENTINEL:
|
|
1058
|
+
if val is not None or k not in optional_fields:
|
|
1059
|
+
m[k] = val
|
|
1060
|
+
|
|
1061
|
+
return m
|
|
1062
|
+
|
|
843
1063
|
|
|
844
1064
|
CreateChatCompletionToolChoiceType = Literal["function",]
|
|
845
1065
|
r"""The type of the tool. Currently, only function is supported."""
|
|
@@ -867,6 +1087,22 @@ class CreateChatCompletionToolChoice2(BaseModel):
|
|
|
867
1087
|
type: Optional[CreateChatCompletionToolChoiceType] = None
|
|
868
1088
|
r"""The type of the tool. Currently, only function is supported."""
|
|
869
1089
|
|
|
1090
|
+
@model_serializer(mode="wrap")
|
|
1091
|
+
def serialize_model(self, handler):
|
|
1092
|
+
optional_fields = set(["type"])
|
|
1093
|
+
serialized = handler(self)
|
|
1094
|
+
m = {}
|
|
1095
|
+
|
|
1096
|
+
for n, f in type(self).model_fields.items():
|
|
1097
|
+
k = f.alias or n
|
|
1098
|
+
val = serialized.get(k)
|
|
1099
|
+
|
|
1100
|
+
if val != UNSET_SENTINEL:
|
|
1101
|
+
if val is not None or k not in optional_fields:
|
|
1102
|
+
m[k] = val
|
|
1103
|
+
|
|
1104
|
+
return m
|
|
1105
|
+
|
|
870
1106
|
|
|
871
1107
|
CreateChatCompletionToolChoice1 = Literal[
|
|
872
1108
|
"none",
|
|
@@ -933,6 +1169,16 @@ class CreateChatCompletionGuardrails(BaseModel):
|
|
|
933
1169
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
934
1170
|
|
|
935
1171
|
|
|
1172
|
+
class CreateChatCompletionFallbacksTypedDict(TypedDict):
|
|
1173
|
+
model: str
|
|
1174
|
+
r"""Fallback model identifier"""
|
|
1175
|
+
|
|
1176
|
+
|
|
1177
|
+
class CreateChatCompletionFallbacks(BaseModel):
|
|
1178
|
+
model: str
|
|
1179
|
+
r"""Fallback model identifier"""
|
|
1180
|
+
|
|
1181
|
+
|
|
936
1182
|
class CreateChatCompletionRetryTypedDict(TypedDict):
|
|
937
1183
|
r"""Retry configuration for the request"""
|
|
938
1184
|
|
|
@@ -951,13 +1197,167 @@ class CreateChatCompletionRetry(BaseModel):
|
|
|
951
1197
|
on_codes: Optional[List[float]] = None
|
|
952
1198
|
r"""HTTP status codes that trigger retry logic"""
|
|
953
1199
|
|
|
1200
|
+
@model_serializer(mode="wrap")
|
|
1201
|
+
def serialize_model(self, handler):
|
|
1202
|
+
optional_fields = set(["count", "on_codes"])
|
|
1203
|
+
serialized = handler(self)
|
|
1204
|
+
m = {}
|
|
1205
|
+
|
|
1206
|
+
for n, f in type(self).model_fields.items():
|
|
1207
|
+
k = f.alias or n
|
|
1208
|
+
val = serialized.get(k)
|
|
1209
|
+
|
|
1210
|
+
if val != UNSET_SENTINEL:
|
|
1211
|
+
if val is not None or k not in optional_fields:
|
|
1212
|
+
m[k] = val
|
|
1213
|
+
|
|
1214
|
+
return m
|
|
1215
|
+
|
|
1216
|
+
|
|
1217
|
+
CreateChatCompletionRouterChatCompletionsRequestType = Literal["exact_match",]
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
class CreateChatCompletionCacheTypedDict(TypedDict):
|
|
1221
|
+
r"""Cache configuration for the request."""
|
|
1222
|
+
|
|
1223
|
+
type: CreateChatCompletionRouterChatCompletionsRequestType
|
|
1224
|
+
ttl: NotRequired[float]
|
|
1225
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1226
|
+
|
|
1227
|
+
|
|
1228
|
+
class CreateChatCompletionCache(BaseModel):
|
|
1229
|
+
r"""Cache configuration for the request."""
|
|
954
1230
|
|
|
955
|
-
|
|
1231
|
+
type: CreateChatCompletionRouterChatCompletionsRequestType
|
|
1232
|
+
|
|
1233
|
+
ttl: Optional[float] = 1800
|
|
1234
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1235
|
+
|
|
1236
|
+
@model_serializer(mode="wrap")
|
|
1237
|
+
def serialize_model(self, handler):
|
|
1238
|
+
optional_fields = set(["ttl"])
|
|
1239
|
+
serialized = handler(self)
|
|
1240
|
+
m = {}
|
|
1241
|
+
|
|
1242
|
+
for n, f in type(self).model_fields.items():
|
|
1243
|
+
k = f.alias or n
|
|
1244
|
+
val = serialized.get(k)
|
|
1245
|
+
|
|
1246
|
+
if val != UNSET_SENTINEL:
|
|
1247
|
+
if val is not None or k not in optional_fields:
|
|
1248
|
+
m[k] = val
|
|
1249
|
+
|
|
1250
|
+
return m
|
|
1251
|
+
|
|
1252
|
+
|
|
1253
|
+
CreateChatCompletionLoadBalancerType = Literal["weight_based",]
|
|
1254
|
+
|
|
1255
|
+
|
|
1256
|
+
class CreateChatCompletionLoadBalancerModelsTypedDict(TypedDict):
|
|
1257
|
+
model: str
|
|
1258
|
+
r"""Model identifier for load balancing"""
|
|
1259
|
+
weight: NotRequired[float]
|
|
1260
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1261
|
+
|
|
1262
|
+
|
|
1263
|
+
class CreateChatCompletionLoadBalancerModels(BaseModel):
|
|
1264
|
+
model: str
|
|
1265
|
+
r"""Model identifier for load balancing"""
|
|
1266
|
+
|
|
1267
|
+
weight: Optional[float] = 0.5
|
|
1268
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1269
|
+
|
|
1270
|
+
@model_serializer(mode="wrap")
|
|
1271
|
+
def serialize_model(self, handler):
|
|
1272
|
+
optional_fields = set(["weight"])
|
|
1273
|
+
serialized = handler(self)
|
|
1274
|
+
m = {}
|
|
1275
|
+
|
|
1276
|
+
for n, f in type(self).model_fields.items():
|
|
1277
|
+
k = f.alias or n
|
|
1278
|
+
val = serialized.get(k)
|
|
1279
|
+
|
|
1280
|
+
if val != UNSET_SENTINEL:
|
|
1281
|
+
if val is not None or k not in optional_fields:
|
|
1282
|
+
m[k] = val
|
|
1283
|
+
|
|
1284
|
+
return m
|
|
1285
|
+
|
|
1286
|
+
|
|
1287
|
+
class CreateChatCompletionLoadBalancer1TypedDict(TypedDict):
|
|
1288
|
+
type: CreateChatCompletionLoadBalancerType
|
|
1289
|
+
models: List[CreateChatCompletionLoadBalancerModelsTypedDict]
|
|
1290
|
+
|
|
1291
|
+
|
|
1292
|
+
class CreateChatCompletionLoadBalancer1(BaseModel):
|
|
1293
|
+
type: CreateChatCompletionLoadBalancerType
|
|
1294
|
+
|
|
1295
|
+
models: List[CreateChatCompletionLoadBalancerModels]
|
|
1296
|
+
|
|
1297
|
+
|
|
1298
|
+
CreateChatCompletionLoadBalancerTypedDict = CreateChatCompletionLoadBalancer1TypedDict
|
|
1299
|
+
r"""Load balancer configuration for the request."""
|
|
1300
|
+
|
|
1301
|
+
|
|
1302
|
+
CreateChatCompletionLoadBalancer = CreateChatCompletionLoadBalancer1
|
|
1303
|
+
r"""Load balancer configuration for the request."""
|
|
1304
|
+
|
|
1305
|
+
|
|
1306
|
+
class CreateChatCompletionTimeoutTypedDict(TypedDict):
|
|
1307
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1308
|
+
|
|
1309
|
+
call_timeout: float
|
|
1310
|
+
r"""Timeout value in milliseconds"""
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
class CreateChatCompletionTimeout(BaseModel):
|
|
1314
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1315
|
+
|
|
1316
|
+
call_timeout: float
|
|
1317
|
+
r"""Timeout value in milliseconds"""
|
|
1318
|
+
|
|
1319
|
+
|
|
1320
|
+
class CreateChatCompletionRouterChatCompletionsRetryTypedDict(TypedDict):
|
|
1321
|
+
r"""Retry configuration for the request"""
|
|
1322
|
+
|
|
1323
|
+
count: NotRequired[float]
|
|
1324
|
+
r"""Number of retry attempts (1-5)"""
|
|
1325
|
+
on_codes: NotRequired[List[float]]
|
|
1326
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1327
|
+
|
|
1328
|
+
|
|
1329
|
+
class CreateChatCompletionRouterChatCompletionsRetry(BaseModel):
|
|
1330
|
+
r"""Retry configuration for the request"""
|
|
1331
|
+
|
|
1332
|
+
count: Optional[float] = 3
|
|
1333
|
+
r"""Number of retry attempts (1-5)"""
|
|
1334
|
+
|
|
1335
|
+
on_codes: Optional[List[float]] = None
|
|
1336
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1337
|
+
|
|
1338
|
+
@model_serializer(mode="wrap")
|
|
1339
|
+
def serialize_model(self, handler):
|
|
1340
|
+
optional_fields = set(["count", "on_codes"])
|
|
1341
|
+
serialized = handler(self)
|
|
1342
|
+
m = {}
|
|
1343
|
+
|
|
1344
|
+
for n, f in type(self).model_fields.items():
|
|
1345
|
+
k = f.alias or n
|
|
1346
|
+
val = serialized.get(k)
|
|
1347
|
+
|
|
1348
|
+
if val != UNSET_SENTINEL:
|
|
1349
|
+
if val is not None or k not in optional_fields:
|
|
1350
|
+
m[k] = val
|
|
1351
|
+
|
|
1352
|
+
return m
|
|
1353
|
+
|
|
1354
|
+
|
|
1355
|
+
class CreateChatCompletionRouterChatCompletionsFallbacksTypedDict(TypedDict):
|
|
956
1356
|
model: str
|
|
957
1357
|
r"""Fallback model identifier"""
|
|
958
1358
|
|
|
959
1359
|
|
|
960
|
-
class
|
|
1360
|
+
class CreateChatCompletionRouterChatCompletionsFallbacks(BaseModel):
|
|
961
1361
|
model: str
|
|
962
1362
|
r"""Fallback model identifier"""
|
|
963
1363
|
|
|
@@ -985,51 +1385,6 @@ class Prompt(BaseModel):
|
|
|
985
1385
|
r"""Version of the prompt to use (currently only \"latest\" supported)"""
|
|
986
1386
|
|
|
987
1387
|
|
|
988
|
-
@deprecated(
|
|
989
|
-
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
990
|
-
)
|
|
991
|
-
class CreateChatCompletionContactTypedDict(TypedDict):
|
|
992
|
-
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
993
|
-
|
|
994
|
-
id: str
|
|
995
|
-
r"""Unique identifier for the contact"""
|
|
996
|
-
display_name: NotRequired[str]
|
|
997
|
-
r"""Display name of the contact"""
|
|
998
|
-
email: NotRequired[str]
|
|
999
|
-
r"""Email address of the contact"""
|
|
1000
|
-
metadata: NotRequired[List[Dict[str, Any]]]
|
|
1001
|
-
r"""A hash of key/value pairs containing any other data about the contact"""
|
|
1002
|
-
logo_url: NotRequired[str]
|
|
1003
|
-
r"""URL to the contact's avatar or logo"""
|
|
1004
|
-
tags: NotRequired[List[str]]
|
|
1005
|
-
r"""A list of tags associated with the contact"""
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
@deprecated(
|
|
1009
|
-
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
1010
|
-
)
|
|
1011
|
-
class CreateChatCompletionContact(BaseModel):
|
|
1012
|
-
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
1013
|
-
|
|
1014
|
-
id: str
|
|
1015
|
-
r"""Unique identifier for the contact"""
|
|
1016
|
-
|
|
1017
|
-
display_name: Optional[str] = None
|
|
1018
|
-
r"""Display name of the contact"""
|
|
1019
|
-
|
|
1020
|
-
email: Optional[str] = None
|
|
1021
|
-
r"""Email address of the contact"""
|
|
1022
|
-
|
|
1023
|
-
metadata: Optional[List[Dict[str, Any]]] = None
|
|
1024
|
-
r"""A hash of key/value pairs containing any other data about the contact"""
|
|
1025
|
-
|
|
1026
|
-
logo_url: Optional[str] = None
|
|
1027
|
-
r"""URL to the contact's avatar or logo"""
|
|
1028
|
-
|
|
1029
|
-
tags: Optional[List[str]] = None
|
|
1030
|
-
r"""A list of tags associated with the contact"""
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
1388
|
class CreateChatCompletionThreadTypedDict(TypedDict):
|
|
1034
1389
|
r"""Thread information to group related requests"""
|
|
1035
1390
|
|
|
@@ -1048,6 +1403,22 @@ class CreateChatCompletionThread(BaseModel):
|
|
|
1048
1403
|
tags: Optional[List[str]] = None
|
|
1049
1404
|
r"""Optional tags to differentiate or categorize threads"""
|
|
1050
1405
|
|
|
1406
|
+
@model_serializer(mode="wrap")
|
|
1407
|
+
def serialize_model(self, handler):
|
|
1408
|
+
optional_fields = set(["tags"])
|
|
1409
|
+
serialized = handler(self)
|
|
1410
|
+
m = {}
|
|
1411
|
+
|
|
1412
|
+
for n, f in type(self).model_fields.items():
|
|
1413
|
+
k = f.alias or n
|
|
1414
|
+
val = serialized.get(k)
|
|
1415
|
+
|
|
1416
|
+
if val != UNSET_SENTINEL:
|
|
1417
|
+
if val is not None or k not in optional_fields:
|
|
1418
|
+
m[k] = val
|
|
1419
|
+
|
|
1420
|
+
return m
|
|
1421
|
+
|
|
1051
1422
|
|
|
1052
1423
|
class Inputs2TypedDict(TypedDict):
|
|
1053
1424
|
key: str
|
|
@@ -1062,6 +1433,22 @@ class Inputs2(BaseModel):
|
|
|
1062
1433
|
|
|
1063
1434
|
is_pii: Optional[bool] = None
|
|
1064
1435
|
|
|
1436
|
+
@model_serializer(mode="wrap")
|
|
1437
|
+
def serialize_model(self, handler):
|
|
1438
|
+
optional_fields = set(["value", "is_pii"])
|
|
1439
|
+
serialized = handler(self)
|
|
1440
|
+
m = {}
|
|
1441
|
+
|
|
1442
|
+
for n, f in type(self).model_fields.items():
|
|
1443
|
+
k = f.alias or n
|
|
1444
|
+
val = serialized.get(k)
|
|
1445
|
+
|
|
1446
|
+
if val != UNSET_SENTINEL:
|
|
1447
|
+
if val is not None or k not in optional_fields:
|
|
1448
|
+
m[k] = val
|
|
1449
|
+
|
|
1450
|
+
return m
|
|
1451
|
+
|
|
1065
1452
|
|
|
1066
1453
|
InputsTypedDict = TypeAliasType(
|
|
1067
1454
|
"InputsTypedDict", Union[Dict[str, Any], List[Inputs2TypedDict]]
|
|
@@ -1073,25 +1460,43 @@ Inputs = TypeAliasType("Inputs", Union[Dict[str, Any], List[Inputs2]])
|
|
|
1073
1460
|
r"""Values to replace in the prompt messages using {{variableName}} syntax"""
|
|
1074
1461
|
|
|
1075
1462
|
|
|
1076
|
-
|
|
1463
|
+
CreateChatCompletionRouterChatCompletionsRequestRequestBodyType = Literal[
|
|
1464
|
+
"exact_match",
|
|
1465
|
+
]
|
|
1077
1466
|
|
|
1078
1467
|
|
|
1079
|
-
class
|
|
1468
|
+
class CreateChatCompletionRouterChatCompletionsCacheTypedDict(TypedDict):
|
|
1080
1469
|
r"""Cache configuration for the request."""
|
|
1081
1470
|
|
|
1082
|
-
type:
|
|
1471
|
+
type: CreateChatCompletionRouterChatCompletionsRequestRequestBodyType
|
|
1083
1472
|
ttl: NotRequired[float]
|
|
1084
1473
|
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1085
1474
|
|
|
1086
1475
|
|
|
1087
|
-
class
|
|
1476
|
+
class CreateChatCompletionRouterChatCompletionsCache(BaseModel):
|
|
1088
1477
|
r"""Cache configuration for the request."""
|
|
1089
1478
|
|
|
1090
|
-
type:
|
|
1479
|
+
type: CreateChatCompletionRouterChatCompletionsRequestRequestBodyType
|
|
1091
1480
|
|
|
1092
1481
|
ttl: Optional[float] = 1800
|
|
1093
1482
|
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1094
1483
|
|
|
1484
|
+
@model_serializer(mode="wrap")
|
|
1485
|
+
def serialize_model(self, handler):
|
|
1486
|
+
optional_fields = set(["ttl"])
|
|
1487
|
+
serialized = handler(self)
|
|
1488
|
+
m = {}
|
|
1489
|
+
|
|
1490
|
+
for n, f in type(self).model_fields.items():
|
|
1491
|
+
k = f.alias or n
|
|
1492
|
+
val = serialized.get(k)
|
|
1493
|
+
|
|
1494
|
+
if val != UNSET_SENTINEL:
|
|
1495
|
+
if val is not None or k not in optional_fields:
|
|
1496
|
+
m[k] = val
|
|
1497
|
+
|
|
1498
|
+
return m
|
|
1499
|
+
|
|
1095
1500
|
|
|
1096
1501
|
CreateChatCompletionSearchType = Literal[
|
|
1097
1502
|
"vector_search",
|
|
@@ -1113,48 +1518,50 @@ class CreateChatCompletionOrExists(BaseModel):
|
|
|
1113
1518
|
exists: bool
|
|
1114
1519
|
|
|
1115
1520
|
|
|
1116
|
-
|
|
1117
|
-
"
|
|
1521
|
+
CreateChatCompletionOrRouterChatCompletionsNinTypedDict = TypeAliasType(
|
|
1522
|
+
"CreateChatCompletionOrRouterChatCompletionsNinTypedDict", Union[str, float, bool]
|
|
1118
1523
|
)
|
|
1119
1524
|
|
|
1120
1525
|
|
|
1121
|
-
|
|
1122
|
-
"
|
|
1526
|
+
CreateChatCompletionOrRouterChatCompletionsNin = TypeAliasType(
|
|
1527
|
+
"CreateChatCompletionOrRouterChatCompletionsNin", Union[str, float, bool]
|
|
1123
1528
|
)
|
|
1124
1529
|
|
|
1125
1530
|
|
|
1126
1531
|
class CreateChatCompletionOrNinTypedDict(TypedDict):
|
|
1127
1532
|
r"""Not in"""
|
|
1128
1533
|
|
|
1129
|
-
nin: List[
|
|
1534
|
+
nin: List[CreateChatCompletionOrRouterChatCompletionsNinTypedDict]
|
|
1130
1535
|
|
|
1131
1536
|
|
|
1132
1537
|
class CreateChatCompletionOrNin(BaseModel):
|
|
1133
1538
|
r"""Not in"""
|
|
1134
1539
|
|
|
1135
|
-
nin: List[
|
|
1540
|
+
nin: List[CreateChatCompletionOrRouterChatCompletionsNin]
|
|
1136
1541
|
|
|
1137
1542
|
|
|
1138
|
-
|
|
1139
|
-
"
|
|
1543
|
+
CreateChatCompletionOrRouterChatCompletionsInTypedDict = TypeAliasType(
|
|
1544
|
+
"CreateChatCompletionOrRouterChatCompletionsInTypedDict", Union[str, float, bool]
|
|
1140
1545
|
)
|
|
1141
1546
|
|
|
1142
1547
|
|
|
1143
|
-
|
|
1144
|
-
"
|
|
1548
|
+
CreateChatCompletionOrRouterChatCompletionsIn = TypeAliasType(
|
|
1549
|
+
"CreateChatCompletionOrRouterChatCompletionsIn", Union[str, float, bool]
|
|
1145
1550
|
)
|
|
1146
1551
|
|
|
1147
1552
|
|
|
1148
1553
|
class CreateChatCompletionOrInTypedDict(TypedDict):
|
|
1149
1554
|
r"""In"""
|
|
1150
1555
|
|
|
1151
|
-
in_: List[
|
|
1556
|
+
in_: List[CreateChatCompletionOrRouterChatCompletionsInTypedDict]
|
|
1152
1557
|
|
|
1153
1558
|
|
|
1154
1559
|
class CreateChatCompletionOrIn(BaseModel):
|
|
1155
1560
|
r"""In"""
|
|
1156
1561
|
|
|
1157
|
-
in_: Annotated[
|
|
1562
|
+
in_: Annotated[
|
|
1563
|
+
List[CreateChatCompletionOrRouterChatCompletionsIn], pydantic.Field(alias="in")
|
|
1564
|
+
]
|
|
1158
1565
|
|
|
1159
1566
|
|
|
1160
1567
|
class CreateChatCompletionOrLteTypedDict(TypedDict):
|
|
@@ -1205,52 +1612,52 @@ class CreateChatCompletionOrGt(BaseModel):
|
|
|
1205
1612
|
gt: float
|
|
1206
1613
|
|
|
1207
1614
|
|
|
1208
|
-
|
|
1209
|
-
"
|
|
1615
|
+
CreateChatCompletionOrRouterChatCompletionsNeTypedDict = TypeAliasType(
|
|
1616
|
+
"CreateChatCompletionOrRouterChatCompletionsNeTypedDict", Union[str, float, bool]
|
|
1210
1617
|
)
|
|
1211
1618
|
|
|
1212
1619
|
|
|
1213
|
-
|
|
1214
|
-
"
|
|
1620
|
+
CreateChatCompletionOrRouterChatCompletionsNe = TypeAliasType(
|
|
1621
|
+
"CreateChatCompletionOrRouterChatCompletionsNe", Union[str, float, bool]
|
|
1215
1622
|
)
|
|
1216
1623
|
|
|
1217
1624
|
|
|
1218
1625
|
class CreateChatCompletionOrNeTypedDict(TypedDict):
|
|
1219
1626
|
r"""Not equal to"""
|
|
1220
1627
|
|
|
1221
|
-
ne:
|
|
1628
|
+
ne: CreateChatCompletionOrRouterChatCompletionsNeTypedDict
|
|
1222
1629
|
|
|
1223
1630
|
|
|
1224
1631
|
class CreateChatCompletionOrNe(BaseModel):
|
|
1225
1632
|
r"""Not equal to"""
|
|
1226
1633
|
|
|
1227
|
-
ne:
|
|
1634
|
+
ne: CreateChatCompletionOrRouterChatCompletionsNe
|
|
1228
1635
|
|
|
1229
1636
|
|
|
1230
|
-
|
|
1231
|
-
"
|
|
1637
|
+
CreateChatCompletionOrRouterChatCompletionsEqTypedDict = TypeAliasType(
|
|
1638
|
+
"CreateChatCompletionOrRouterChatCompletionsEqTypedDict", Union[str, float, bool]
|
|
1232
1639
|
)
|
|
1233
1640
|
|
|
1234
1641
|
|
|
1235
|
-
|
|
1236
|
-
"
|
|
1642
|
+
CreateChatCompletionOrRouterChatCompletionsEq = TypeAliasType(
|
|
1643
|
+
"CreateChatCompletionOrRouterChatCompletionsEq", Union[str, float, bool]
|
|
1237
1644
|
)
|
|
1238
1645
|
|
|
1239
1646
|
|
|
1240
1647
|
class CreateChatCompletionOrEqTypedDict(TypedDict):
|
|
1241
1648
|
r"""Equal to"""
|
|
1242
1649
|
|
|
1243
|
-
eq:
|
|
1650
|
+
eq: CreateChatCompletionOrRouterChatCompletionsEqTypedDict
|
|
1244
1651
|
|
|
1245
1652
|
|
|
1246
1653
|
class CreateChatCompletionOrEq(BaseModel):
|
|
1247
1654
|
r"""Equal to"""
|
|
1248
1655
|
|
|
1249
|
-
eq:
|
|
1656
|
+
eq: CreateChatCompletionOrRouterChatCompletionsEq
|
|
1250
1657
|
|
|
1251
1658
|
|
|
1252
|
-
|
|
1253
|
-
"
|
|
1659
|
+
CreateChatCompletionFilterByRouterChatCompletionsOrTypedDict = TypeAliasType(
|
|
1660
|
+
"CreateChatCompletionFilterByRouterChatCompletionsOrTypedDict",
|
|
1254
1661
|
Union[
|
|
1255
1662
|
CreateChatCompletionOrEqTypedDict,
|
|
1256
1663
|
CreateChatCompletionOrNeTypedDict,
|
|
@@ -1265,8 +1672,8 @@ CreateChatCompletionFilterByRouterOrTypedDict = TypeAliasType(
|
|
|
1265
1672
|
)
|
|
1266
1673
|
|
|
1267
1674
|
|
|
1268
|
-
|
|
1269
|
-
"
|
|
1675
|
+
CreateChatCompletionFilterByRouterChatCompletionsOr = TypeAliasType(
|
|
1676
|
+
"CreateChatCompletionFilterByRouterChatCompletionsOr",
|
|
1270
1677
|
Union[
|
|
1271
1678
|
CreateChatCompletionOrEq,
|
|
1272
1679
|
CreateChatCompletionOrNe,
|
|
@@ -1284,14 +1691,14 @@ CreateChatCompletionFilterByRouterOr = TypeAliasType(
|
|
|
1284
1691
|
class CreateChatCompletionFilterByOrTypedDict(TypedDict):
|
|
1285
1692
|
r"""Or"""
|
|
1286
1693
|
|
|
1287
|
-
or_: List[Dict[str,
|
|
1694
|
+
or_: List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsOrTypedDict]]
|
|
1288
1695
|
|
|
1289
1696
|
|
|
1290
1697
|
class CreateChatCompletionFilterByOr(BaseModel):
|
|
1291
1698
|
r"""Or"""
|
|
1292
1699
|
|
|
1293
1700
|
or_: Annotated[
|
|
1294
|
-
List[Dict[str,
|
|
1701
|
+
List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsOr]],
|
|
1295
1702
|
pydantic.Field(alias="or"),
|
|
1296
1703
|
]
|
|
1297
1704
|
|
|
@@ -1308,48 +1715,50 @@ class CreateChatCompletionAndExists(BaseModel):
|
|
|
1308
1715
|
exists: bool
|
|
1309
1716
|
|
|
1310
1717
|
|
|
1311
|
-
|
|
1312
|
-
"
|
|
1718
|
+
CreateChatCompletionAndRouterChatCompletionsNinTypedDict = TypeAliasType(
|
|
1719
|
+
"CreateChatCompletionAndRouterChatCompletionsNinTypedDict", Union[str, float, bool]
|
|
1313
1720
|
)
|
|
1314
1721
|
|
|
1315
1722
|
|
|
1316
|
-
|
|
1317
|
-
"
|
|
1723
|
+
CreateChatCompletionAndRouterChatCompletionsNin = TypeAliasType(
|
|
1724
|
+
"CreateChatCompletionAndRouterChatCompletionsNin", Union[str, float, bool]
|
|
1318
1725
|
)
|
|
1319
1726
|
|
|
1320
1727
|
|
|
1321
1728
|
class CreateChatCompletionAndNinTypedDict(TypedDict):
|
|
1322
1729
|
r"""Not in"""
|
|
1323
1730
|
|
|
1324
|
-
nin: List[
|
|
1731
|
+
nin: List[CreateChatCompletionAndRouterChatCompletionsNinTypedDict]
|
|
1325
1732
|
|
|
1326
1733
|
|
|
1327
1734
|
class CreateChatCompletionAndNin(BaseModel):
|
|
1328
1735
|
r"""Not in"""
|
|
1329
1736
|
|
|
1330
|
-
nin: List[
|
|
1737
|
+
nin: List[CreateChatCompletionAndRouterChatCompletionsNin]
|
|
1331
1738
|
|
|
1332
1739
|
|
|
1333
|
-
|
|
1334
|
-
"
|
|
1740
|
+
CreateChatCompletionAndRouterChatCompletionsInTypedDict = TypeAliasType(
|
|
1741
|
+
"CreateChatCompletionAndRouterChatCompletionsInTypedDict", Union[str, float, bool]
|
|
1335
1742
|
)
|
|
1336
1743
|
|
|
1337
1744
|
|
|
1338
|
-
|
|
1339
|
-
"
|
|
1745
|
+
CreateChatCompletionAndRouterChatCompletionsIn = TypeAliasType(
|
|
1746
|
+
"CreateChatCompletionAndRouterChatCompletionsIn", Union[str, float, bool]
|
|
1340
1747
|
)
|
|
1341
1748
|
|
|
1342
1749
|
|
|
1343
1750
|
class CreateChatCompletionAndInTypedDict(TypedDict):
|
|
1344
1751
|
r"""In"""
|
|
1345
1752
|
|
|
1346
|
-
in_: List[
|
|
1753
|
+
in_: List[CreateChatCompletionAndRouterChatCompletionsInTypedDict]
|
|
1347
1754
|
|
|
1348
1755
|
|
|
1349
1756
|
class CreateChatCompletionAndIn(BaseModel):
|
|
1350
1757
|
r"""In"""
|
|
1351
1758
|
|
|
1352
|
-
in_: Annotated[
|
|
1759
|
+
in_: Annotated[
|
|
1760
|
+
List[CreateChatCompletionAndRouterChatCompletionsIn], pydantic.Field(alias="in")
|
|
1761
|
+
]
|
|
1353
1762
|
|
|
1354
1763
|
|
|
1355
1764
|
class CreateChatCompletionAndLteTypedDict(TypedDict):
|
|
@@ -1400,52 +1809,52 @@ class CreateChatCompletionAndGt(BaseModel):
|
|
|
1400
1809
|
gt: float
|
|
1401
1810
|
|
|
1402
1811
|
|
|
1403
|
-
|
|
1404
|
-
"
|
|
1812
|
+
CreateChatCompletionAndRouterChatCompletionsNeTypedDict = TypeAliasType(
|
|
1813
|
+
"CreateChatCompletionAndRouterChatCompletionsNeTypedDict", Union[str, float, bool]
|
|
1405
1814
|
)
|
|
1406
1815
|
|
|
1407
1816
|
|
|
1408
|
-
|
|
1409
|
-
"
|
|
1817
|
+
CreateChatCompletionAndRouterChatCompletionsNe = TypeAliasType(
|
|
1818
|
+
"CreateChatCompletionAndRouterChatCompletionsNe", Union[str, float, bool]
|
|
1410
1819
|
)
|
|
1411
1820
|
|
|
1412
1821
|
|
|
1413
1822
|
class CreateChatCompletionAndNeTypedDict(TypedDict):
|
|
1414
1823
|
r"""Not equal to"""
|
|
1415
1824
|
|
|
1416
|
-
ne:
|
|
1825
|
+
ne: CreateChatCompletionAndRouterChatCompletionsNeTypedDict
|
|
1417
1826
|
|
|
1418
1827
|
|
|
1419
1828
|
class CreateChatCompletionAndNe(BaseModel):
|
|
1420
1829
|
r"""Not equal to"""
|
|
1421
1830
|
|
|
1422
|
-
ne:
|
|
1831
|
+
ne: CreateChatCompletionAndRouterChatCompletionsNe
|
|
1423
1832
|
|
|
1424
1833
|
|
|
1425
|
-
|
|
1426
|
-
"
|
|
1834
|
+
CreateChatCompletionAndRouterChatCompletionsEqTypedDict = TypeAliasType(
|
|
1835
|
+
"CreateChatCompletionAndRouterChatCompletionsEqTypedDict", Union[str, float, bool]
|
|
1427
1836
|
)
|
|
1428
1837
|
|
|
1429
1838
|
|
|
1430
|
-
|
|
1431
|
-
"
|
|
1839
|
+
CreateChatCompletionAndRouterChatCompletionsEq = TypeAliasType(
|
|
1840
|
+
"CreateChatCompletionAndRouterChatCompletionsEq", Union[str, float, bool]
|
|
1432
1841
|
)
|
|
1433
1842
|
|
|
1434
1843
|
|
|
1435
1844
|
class CreateChatCompletionAndEqTypedDict(TypedDict):
|
|
1436
1845
|
r"""Equal to"""
|
|
1437
1846
|
|
|
1438
|
-
eq:
|
|
1847
|
+
eq: CreateChatCompletionAndRouterChatCompletionsEqTypedDict
|
|
1439
1848
|
|
|
1440
1849
|
|
|
1441
1850
|
class CreateChatCompletionAndEq(BaseModel):
|
|
1442
1851
|
r"""Equal to"""
|
|
1443
1852
|
|
|
1444
|
-
eq:
|
|
1853
|
+
eq: CreateChatCompletionAndRouterChatCompletionsEq
|
|
1445
1854
|
|
|
1446
1855
|
|
|
1447
|
-
|
|
1448
|
-
"
|
|
1856
|
+
CreateChatCompletionFilterByRouterChatCompletionsAndTypedDict = TypeAliasType(
|
|
1857
|
+
"CreateChatCompletionFilterByRouterChatCompletionsAndTypedDict",
|
|
1449
1858
|
Union[
|
|
1450
1859
|
CreateChatCompletionAndEqTypedDict,
|
|
1451
1860
|
CreateChatCompletionAndNeTypedDict,
|
|
@@ -1460,8 +1869,8 @@ CreateChatCompletionFilterByRouterAndTypedDict = TypeAliasType(
|
|
|
1460
1869
|
)
|
|
1461
1870
|
|
|
1462
1871
|
|
|
1463
|
-
|
|
1464
|
-
"
|
|
1872
|
+
CreateChatCompletionFilterByRouterChatCompletionsAnd = TypeAliasType(
|
|
1873
|
+
"CreateChatCompletionFilterByRouterChatCompletionsAnd",
|
|
1465
1874
|
Union[
|
|
1466
1875
|
CreateChatCompletionAndEq,
|
|
1467
1876
|
CreateChatCompletionAndNe,
|
|
@@ -1479,14 +1888,14 @@ CreateChatCompletionFilterByRouterAnd = TypeAliasType(
|
|
|
1479
1888
|
class CreateChatCompletionFilterByAndTypedDict(TypedDict):
|
|
1480
1889
|
r"""And"""
|
|
1481
1890
|
|
|
1482
|
-
and_: List[Dict[str,
|
|
1891
|
+
and_: List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsAndTypedDict]]
|
|
1483
1892
|
|
|
1484
1893
|
|
|
1485
1894
|
class CreateChatCompletionFilterByAnd(BaseModel):
|
|
1486
1895
|
r"""And"""
|
|
1487
1896
|
|
|
1488
1897
|
and_: Annotated[
|
|
1489
|
-
List[Dict[str,
|
|
1898
|
+
List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsAnd]],
|
|
1490
1899
|
pydantic.Field(alias="and"),
|
|
1491
1900
|
]
|
|
1492
1901
|
|
|
@@ -1503,48 +1912,50 @@ class CreateChatCompletion1Exists(BaseModel):
|
|
|
1503
1912
|
exists: bool
|
|
1504
1913
|
|
|
1505
1914
|
|
|
1506
|
-
|
|
1507
|
-
"
|
|
1915
|
+
CreateChatCompletion1RouterChatCompletionsNinTypedDict = TypeAliasType(
|
|
1916
|
+
"CreateChatCompletion1RouterChatCompletionsNinTypedDict", Union[str, float, bool]
|
|
1508
1917
|
)
|
|
1509
1918
|
|
|
1510
1919
|
|
|
1511
|
-
|
|
1512
|
-
"
|
|
1920
|
+
CreateChatCompletion1RouterChatCompletionsNin = TypeAliasType(
|
|
1921
|
+
"CreateChatCompletion1RouterChatCompletionsNin", Union[str, float, bool]
|
|
1513
1922
|
)
|
|
1514
1923
|
|
|
1515
1924
|
|
|
1516
1925
|
class CreateChatCompletion1NinTypedDict(TypedDict):
|
|
1517
1926
|
r"""Not in"""
|
|
1518
1927
|
|
|
1519
|
-
nin: List[
|
|
1928
|
+
nin: List[CreateChatCompletion1RouterChatCompletionsNinTypedDict]
|
|
1520
1929
|
|
|
1521
1930
|
|
|
1522
1931
|
class CreateChatCompletion1Nin(BaseModel):
|
|
1523
1932
|
r"""Not in"""
|
|
1524
1933
|
|
|
1525
|
-
nin: List[
|
|
1934
|
+
nin: List[CreateChatCompletion1RouterChatCompletionsNin]
|
|
1526
1935
|
|
|
1527
1936
|
|
|
1528
|
-
|
|
1529
|
-
"
|
|
1937
|
+
CreateChatCompletion1RouterChatCompletionsInTypedDict = TypeAliasType(
|
|
1938
|
+
"CreateChatCompletion1RouterChatCompletionsInTypedDict", Union[str, float, bool]
|
|
1530
1939
|
)
|
|
1531
1940
|
|
|
1532
1941
|
|
|
1533
|
-
|
|
1534
|
-
"
|
|
1942
|
+
CreateChatCompletion1RouterChatCompletionsIn = TypeAliasType(
|
|
1943
|
+
"CreateChatCompletion1RouterChatCompletionsIn", Union[str, float, bool]
|
|
1535
1944
|
)
|
|
1536
1945
|
|
|
1537
1946
|
|
|
1538
1947
|
class CreateChatCompletion1InTypedDict(TypedDict):
|
|
1539
1948
|
r"""In"""
|
|
1540
1949
|
|
|
1541
|
-
in_: List[
|
|
1950
|
+
in_: List[CreateChatCompletion1RouterChatCompletionsInTypedDict]
|
|
1542
1951
|
|
|
1543
1952
|
|
|
1544
1953
|
class CreateChatCompletion1In(BaseModel):
|
|
1545
1954
|
r"""In"""
|
|
1546
1955
|
|
|
1547
|
-
in_: Annotated[
|
|
1956
|
+
in_: Annotated[
|
|
1957
|
+
List[CreateChatCompletion1RouterChatCompletionsIn], pydantic.Field(alias="in")
|
|
1958
|
+
]
|
|
1548
1959
|
|
|
1549
1960
|
|
|
1550
1961
|
class CreateChatCompletion1LteTypedDict(TypedDict):
|
|
@@ -1595,48 +2006,48 @@ class CreateChatCompletion1Gt(BaseModel):
|
|
|
1595
2006
|
gt: float
|
|
1596
2007
|
|
|
1597
2008
|
|
|
1598
|
-
|
|
1599
|
-
"
|
|
2009
|
+
CreateChatCompletion1RouterChatCompletionsNeTypedDict = TypeAliasType(
|
|
2010
|
+
"CreateChatCompletion1RouterChatCompletionsNeTypedDict", Union[str, float, bool]
|
|
1600
2011
|
)
|
|
1601
2012
|
|
|
1602
2013
|
|
|
1603
|
-
|
|
1604
|
-
"
|
|
2014
|
+
CreateChatCompletion1RouterChatCompletionsNe = TypeAliasType(
|
|
2015
|
+
"CreateChatCompletion1RouterChatCompletionsNe", Union[str, float, bool]
|
|
1605
2016
|
)
|
|
1606
2017
|
|
|
1607
2018
|
|
|
1608
2019
|
class CreateChatCompletion1NeTypedDict(TypedDict):
|
|
1609
2020
|
r"""Not equal to"""
|
|
1610
2021
|
|
|
1611
|
-
ne:
|
|
2022
|
+
ne: CreateChatCompletion1RouterChatCompletionsNeTypedDict
|
|
1612
2023
|
|
|
1613
2024
|
|
|
1614
2025
|
class CreateChatCompletion1Ne(BaseModel):
|
|
1615
2026
|
r"""Not equal to"""
|
|
1616
2027
|
|
|
1617
|
-
ne:
|
|
2028
|
+
ne: CreateChatCompletion1RouterChatCompletionsNe
|
|
1618
2029
|
|
|
1619
2030
|
|
|
1620
|
-
|
|
1621
|
-
"
|
|
2031
|
+
CreateChatCompletion1RouterChatCompletionsEqTypedDict = TypeAliasType(
|
|
2032
|
+
"CreateChatCompletion1RouterChatCompletionsEqTypedDict", Union[str, float, bool]
|
|
1622
2033
|
)
|
|
1623
2034
|
|
|
1624
2035
|
|
|
1625
|
-
|
|
1626
|
-
"
|
|
2036
|
+
CreateChatCompletion1RouterChatCompletionsEq = TypeAliasType(
|
|
2037
|
+
"CreateChatCompletion1RouterChatCompletionsEq", Union[str, float, bool]
|
|
1627
2038
|
)
|
|
1628
2039
|
|
|
1629
2040
|
|
|
1630
2041
|
class CreateChatCompletion1EqTypedDict(TypedDict):
|
|
1631
2042
|
r"""Equal to"""
|
|
1632
2043
|
|
|
1633
|
-
eq:
|
|
2044
|
+
eq: CreateChatCompletion1RouterChatCompletionsEqTypedDict
|
|
1634
2045
|
|
|
1635
2046
|
|
|
1636
2047
|
class CreateChatCompletion1Eq(BaseModel):
|
|
1637
2048
|
r"""Equal to"""
|
|
1638
2049
|
|
|
1639
|
-
eq:
|
|
2050
|
+
eq: CreateChatCompletion1RouterChatCompletionsEq
|
|
1640
2051
|
|
|
1641
2052
|
|
|
1642
2053
|
CreateChatCompletionFilterBy1TypedDict = TypeAliasType(
|
|
@@ -1716,6 +2127,22 @@ class CreateChatCompletionSearchOptions(BaseModel):
|
|
|
1716
2127
|
include_scores: Optional[bool] = None
|
|
1717
2128
|
r"""Whether to include the scores in the chunk"""
|
|
1718
2129
|
|
|
2130
|
+
@model_serializer(mode="wrap")
|
|
2131
|
+
def serialize_model(self, handler):
|
|
2132
|
+
optional_fields = set(["include_vectors", "include_metadata", "include_scores"])
|
|
2133
|
+
serialized = handler(self)
|
|
2134
|
+
m = {}
|
|
2135
|
+
|
|
2136
|
+
for n, f in type(self).model_fields.items():
|
|
2137
|
+
k = f.alias or n
|
|
2138
|
+
val = serialized.get(k)
|
|
2139
|
+
|
|
2140
|
+
if val != UNSET_SENTINEL:
|
|
2141
|
+
if val is not None or k not in optional_fields:
|
|
2142
|
+
m[k] = val
|
|
2143
|
+
|
|
2144
|
+
return m
|
|
2145
|
+
|
|
1719
2146
|
|
|
1720
2147
|
class CreateChatCompletionRerankConfigTypedDict(TypedDict):
|
|
1721
2148
|
r"""Override the rerank configuration for this search. If not provided, will use the knowledge base configured rerank settings."""
|
|
@@ -1740,6 +2167,22 @@ class CreateChatCompletionRerankConfig(BaseModel):
|
|
|
1740
2167
|
top_k: Optional[int] = 10
|
|
1741
2168
|
r"""The number of top results to return after reranking. If not provided, will default to the knowledge base configured `top_k`."""
|
|
1742
2169
|
|
|
2170
|
+
@model_serializer(mode="wrap")
|
|
2171
|
+
def serialize_model(self, handler):
|
|
2172
|
+
optional_fields = set(["threshold", "top_k"])
|
|
2173
|
+
serialized = handler(self)
|
|
2174
|
+
m = {}
|
|
2175
|
+
|
|
2176
|
+
for n, f in type(self).model_fields.items():
|
|
2177
|
+
k = f.alias or n
|
|
2178
|
+
val = serialized.get(k)
|
|
2179
|
+
|
|
2180
|
+
if val != UNSET_SENTINEL:
|
|
2181
|
+
if val is not None or k not in optional_fields:
|
|
2182
|
+
m[k] = val
|
|
2183
|
+
|
|
2184
|
+
return m
|
|
2185
|
+
|
|
1743
2186
|
|
|
1744
2187
|
class CreateChatCompletionAgenticRagConfigTypedDict(TypedDict):
|
|
1745
2188
|
r"""Override the agentic RAG configuration for this search. If not provided, will use the knowledge base configured agentic RAG settings."""
|
|
@@ -1804,94 +2247,168 @@ class CreateChatCompletionKnowledgeBases(BaseModel):
|
|
|
1804
2247
|
query: Optional[str] = None
|
|
1805
2248
|
r"""The query to use to search the knowledge base. If not provided we will use the last user message from the messages of the requests"""
|
|
1806
2249
|
|
|
2250
|
+
@model_serializer(mode="wrap")
|
|
2251
|
+
def serialize_model(self, handler):
|
|
2252
|
+
optional_fields = set(
|
|
2253
|
+
[
|
|
2254
|
+
"top_k",
|
|
2255
|
+
"threshold",
|
|
2256
|
+
"search_type",
|
|
2257
|
+
"filter_by",
|
|
2258
|
+
"search_options",
|
|
2259
|
+
"rerank_config",
|
|
2260
|
+
"agentic_rag_config",
|
|
2261
|
+
"query",
|
|
2262
|
+
]
|
|
2263
|
+
)
|
|
2264
|
+
serialized = handler(self)
|
|
2265
|
+
m = {}
|
|
2266
|
+
|
|
2267
|
+
for n, f in type(self).model_fields.items():
|
|
2268
|
+
k = f.alias or n
|
|
2269
|
+
val = serialized.get(k)
|
|
2270
|
+
|
|
2271
|
+
if val != UNSET_SENTINEL:
|
|
2272
|
+
if val is not None or k not in optional_fields:
|
|
2273
|
+
m[k] = val
|
|
2274
|
+
|
|
2275
|
+
return m
|
|
2276
|
+
|
|
1807
2277
|
|
|
1808
|
-
|
|
2278
|
+
CreateChatCompletionLoadBalancerRouterChatCompletionsType = Literal["weight_based",]
|
|
1809
2279
|
|
|
1810
2280
|
|
|
1811
|
-
class
|
|
1812
|
-
type: LoadBalancerType
|
|
2281
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletionsModelsTypedDict(TypedDict):
|
|
1813
2282
|
model: str
|
|
1814
2283
|
r"""Model identifier for load balancing"""
|
|
1815
2284
|
weight: NotRequired[float]
|
|
1816
2285
|
r"""Weight assigned to this model for load balancing"""
|
|
1817
2286
|
|
|
1818
2287
|
|
|
1819
|
-
class
|
|
1820
|
-
type: LoadBalancerType
|
|
1821
|
-
|
|
2288
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletionsModels(BaseModel):
|
|
1822
2289
|
model: str
|
|
1823
2290
|
r"""Model identifier for load balancing"""
|
|
1824
2291
|
|
|
1825
2292
|
weight: Optional[float] = 0.5
|
|
1826
2293
|
r"""Weight assigned to this model for load balancing"""
|
|
1827
2294
|
|
|
2295
|
+
@model_serializer(mode="wrap")
|
|
2296
|
+
def serialize_model(self, handler):
|
|
2297
|
+
optional_fields = set(["weight"])
|
|
2298
|
+
serialized = handler(self)
|
|
2299
|
+
m = {}
|
|
2300
|
+
|
|
2301
|
+
for n, f in type(self).model_fields.items():
|
|
2302
|
+
k = f.alias or n
|
|
2303
|
+
val = serialized.get(k)
|
|
2304
|
+
|
|
2305
|
+
if val != UNSET_SENTINEL:
|
|
2306
|
+
if val is not None or k not in optional_fields:
|
|
2307
|
+
m[k] = val
|
|
2308
|
+
|
|
2309
|
+
return m
|
|
1828
2310
|
|
|
1829
|
-
LoadBalancerTypedDict = LoadBalancer1TypedDict
|
|
1830
2311
|
|
|
2312
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletions1TypedDict(TypedDict):
|
|
2313
|
+
type: CreateChatCompletionLoadBalancerRouterChatCompletionsType
|
|
2314
|
+
models: List[CreateChatCompletionLoadBalancerRouterChatCompletionsModelsTypedDict]
|
|
1831
2315
|
|
|
1832
|
-
LoadBalancer = LoadBalancer1
|
|
1833
2316
|
|
|
2317
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletions1(BaseModel):
|
|
2318
|
+
type: CreateChatCompletionLoadBalancerRouterChatCompletionsType
|
|
1834
2319
|
|
|
1835
|
-
|
|
2320
|
+
models: List[CreateChatCompletionLoadBalancerRouterChatCompletionsModels]
|
|
2321
|
+
|
|
2322
|
+
|
|
2323
|
+
CreateChatCompletionRouterChatCompletionsLoadBalancerTypedDict = (
|
|
2324
|
+
CreateChatCompletionLoadBalancerRouterChatCompletions1TypedDict
|
|
2325
|
+
)
|
|
2326
|
+
r"""Array of models with weights for load balancing requests"""
|
|
2327
|
+
|
|
2328
|
+
|
|
2329
|
+
CreateChatCompletionRouterChatCompletionsLoadBalancer = (
|
|
2330
|
+
CreateChatCompletionLoadBalancerRouterChatCompletions1
|
|
2331
|
+
)
|
|
2332
|
+
r"""Array of models with weights for load balancing requests"""
|
|
2333
|
+
|
|
2334
|
+
|
|
2335
|
+
class CreateChatCompletionRouterChatCompletionsTimeoutTypedDict(TypedDict):
|
|
1836
2336
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1837
2337
|
|
|
1838
2338
|
call_timeout: float
|
|
1839
2339
|
r"""Timeout value in milliseconds"""
|
|
1840
2340
|
|
|
1841
2341
|
|
|
1842
|
-
class
|
|
2342
|
+
class CreateChatCompletionRouterChatCompletionsTimeout(BaseModel):
|
|
1843
2343
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1844
2344
|
|
|
1845
2345
|
call_timeout: float
|
|
1846
2346
|
r"""Timeout value in milliseconds"""
|
|
1847
2347
|
|
|
1848
2348
|
|
|
2349
|
+
@deprecated(
|
|
2350
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2351
|
+
)
|
|
1849
2352
|
class CreateChatCompletionOrqTypedDict(TypedDict):
|
|
1850
2353
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
1851
2354
|
|
|
1852
2355
|
name: NotRequired[str]
|
|
1853
2356
|
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1854
|
-
retry: NotRequired[
|
|
2357
|
+
retry: NotRequired[CreateChatCompletionRouterChatCompletionsRetryTypedDict]
|
|
1855
2358
|
r"""Retry configuration for the request"""
|
|
1856
|
-
fallbacks: NotRequired[
|
|
2359
|
+
fallbacks: NotRequired[
|
|
2360
|
+
List[CreateChatCompletionRouterChatCompletionsFallbacksTypedDict]
|
|
2361
|
+
]
|
|
1857
2362
|
r"""Array of fallback models to use if primary model fails"""
|
|
1858
2363
|
prompt: NotRequired[PromptTypedDict]
|
|
1859
2364
|
r"""Prompt configuration for the request"""
|
|
1860
|
-
identity: NotRequired[
|
|
2365
|
+
identity: NotRequired[PublicIdentityTypedDict]
|
|
1861
2366
|
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
1862
|
-
contact: NotRequired[
|
|
2367
|
+
contact: NotRequired[PublicContactTypedDict]
|
|
2368
|
+
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
1863
2369
|
thread: NotRequired[CreateChatCompletionThreadTypedDict]
|
|
1864
2370
|
r"""Thread information to group related requests"""
|
|
1865
2371
|
inputs: NotRequired[InputsTypedDict]
|
|
1866
2372
|
r"""Values to replace in the prompt messages using {{variableName}} syntax"""
|
|
1867
|
-
cache: NotRequired[
|
|
2373
|
+
cache: NotRequired[CreateChatCompletionRouterChatCompletionsCacheTypedDict]
|
|
1868
2374
|
r"""Cache configuration for the request."""
|
|
1869
2375
|
knowledge_bases: NotRequired[List[CreateChatCompletionKnowledgeBasesTypedDict]]
|
|
1870
|
-
load_balancer: NotRequired[
|
|
2376
|
+
load_balancer: NotRequired[
|
|
2377
|
+
CreateChatCompletionRouterChatCompletionsLoadBalancerTypedDict
|
|
2378
|
+
]
|
|
1871
2379
|
r"""Array of models with weights for load balancing requests"""
|
|
1872
|
-
timeout: NotRequired[
|
|
2380
|
+
timeout: NotRequired[CreateChatCompletionRouterChatCompletionsTimeoutTypedDict]
|
|
1873
2381
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1874
2382
|
|
|
1875
2383
|
|
|
2384
|
+
@deprecated(
|
|
2385
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2386
|
+
)
|
|
1876
2387
|
class CreateChatCompletionOrq(BaseModel):
|
|
1877
2388
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
1878
2389
|
|
|
1879
2390
|
name: Optional[str] = None
|
|
1880
2391
|
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1881
2392
|
|
|
1882
|
-
retry: Optional[
|
|
2393
|
+
retry: Optional[CreateChatCompletionRouterChatCompletionsRetry] = None
|
|
1883
2394
|
r"""Retry configuration for the request"""
|
|
1884
2395
|
|
|
1885
|
-
fallbacks: Optional[List[
|
|
2396
|
+
fallbacks: Optional[List[CreateChatCompletionRouterChatCompletionsFallbacks]] = None
|
|
1886
2397
|
r"""Array of fallback models to use if primary model fails"""
|
|
1887
2398
|
|
|
1888
2399
|
prompt: Optional[Prompt] = None
|
|
1889
2400
|
r"""Prompt configuration for the request"""
|
|
1890
2401
|
|
|
1891
|
-
identity: Optional[
|
|
2402
|
+
identity: Optional[PublicIdentity] = None
|
|
1892
2403
|
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
1893
2404
|
|
|
1894
|
-
contact:
|
|
2405
|
+
contact: Annotated[
|
|
2406
|
+
Optional[PublicContact],
|
|
2407
|
+
pydantic.Field(
|
|
2408
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2409
|
+
),
|
|
2410
|
+
] = None
|
|
2411
|
+
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
1895
2412
|
|
|
1896
2413
|
thread: Optional[CreateChatCompletionThread] = None
|
|
1897
2414
|
r"""Thread information to group related requests"""
|
|
@@ -1899,17 +2416,50 @@ class CreateChatCompletionOrq(BaseModel):
|
|
|
1899
2416
|
inputs: Optional[Inputs] = None
|
|
1900
2417
|
r"""Values to replace in the prompt messages using {{variableName}} syntax"""
|
|
1901
2418
|
|
|
1902
|
-
cache: Optional[
|
|
2419
|
+
cache: Optional[CreateChatCompletionRouterChatCompletionsCache] = None
|
|
1903
2420
|
r"""Cache configuration for the request."""
|
|
1904
2421
|
|
|
1905
2422
|
knowledge_bases: Optional[List[CreateChatCompletionKnowledgeBases]] = None
|
|
1906
2423
|
|
|
1907
|
-
load_balancer: Optional[
|
|
2424
|
+
load_balancer: Optional[CreateChatCompletionRouterChatCompletionsLoadBalancer] = (
|
|
2425
|
+
None
|
|
2426
|
+
)
|
|
1908
2427
|
r"""Array of models with weights for load balancing requests"""
|
|
1909
2428
|
|
|
1910
|
-
timeout: Optional[
|
|
2429
|
+
timeout: Optional[CreateChatCompletionRouterChatCompletionsTimeout] = None
|
|
1911
2430
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1912
2431
|
|
|
2432
|
+
@model_serializer(mode="wrap")
|
|
2433
|
+
def serialize_model(self, handler):
|
|
2434
|
+
optional_fields = set(
|
|
2435
|
+
[
|
|
2436
|
+
"name",
|
|
2437
|
+
"retry",
|
|
2438
|
+
"fallbacks",
|
|
2439
|
+
"prompt",
|
|
2440
|
+
"identity",
|
|
2441
|
+
"contact",
|
|
2442
|
+
"thread",
|
|
2443
|
+
"inputs",
|
|
2444
|
+
"cache",
|
|
2445
|
+
"knowledge_bases",
|
|
2446
|
+
"load_balancer",
|
|
2447
|
+
"timeout",
|
|
2448
|
+
]
|
|
2449
|
+
)
|
|
2450
|
+
serialized = handler(self)
|
|
2451
|
+
m = {}
|
|
2452
|
+
|
|
2453
|
+
for n, f in type(self).model_fields.items():
|
|
2454
|
+
k = f.alias or n
|
|
2455
|
+
val = serialized.get(k)
|
|
2456
|
+
|
|
2457
|
+
if val != UNSET_SENTINEL:
|
|
2458
|
+
if val is not None or k not in optional_fields:
|
|
2459
|
+
m[k] = val
|
|
2460
|
+
|
|
2461
|
+
return m
|
|
2462
|
+
|
|
1913
2463
|
|
|
1914
2464
|
class CreateChatCompletionRequestBodyTypedDict(TypedDict):
|
|
1915
2465
|
messages: List[CreateChatCompletionMessagesTypedDict]
|
|
@@ -1974,6 +2524,16 @@ class CreateChatCompletionRequestBodyTypedDict(TypedDict):
|
|
|
1974
2524
|
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
1975
2525
|
guardrails: NotRequired[List[CreateChatCompletionGuardrailsTypedDict]]
|
|
1976
2526
|
r"""A list of guardrails to apply to the request."""
|
|
2527
|
+
fallbacks: NotRequired[List[CreateChatCompletionFallbacksTypedDict]]
|
|
2528
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2529
|
+
retry: NotRequired[CreateChatCompletionRetryTypedDict]
|
|
2530
|
+
r"""Retry configuration for the request"""
|
|
2531
|
+
cache: NotRequired[CreateChatCompletionCacheTypedDict]
|
|
2532
|
+
r"""Cache configuration for the request."""
|
|
2533
|
+
load_balancer: NotRequired[CreateChatCompletionLoadBalancerTypedDict]
|
|
2534
|
+
r"""Load balancer configuration for the request."""
|
|
2535
|
+
timeout: NotRequired[CreateChatCompletionTimeoutTypedDict]
|
|
2536
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1977
2537
|
orq: NotRequired[CreateChatCompletionOrqTypedDict]
|
|
1978
2538
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
1979
2539
|
stream: NotRequired[bool]
|
|
@@ -2068,86 +2628,110 @@ class CreateChatCompletionRequestBody(BaseModel):
|
|
|
2068
2628
|
guardrails: Optional[List[CreateChatCompletionGuardrails]] = None
|
|
2069
2629
|
r"""A list of guardrails to apply to the request."""
|
|
2070
2630
|
|
|
2071
|
-
|
|
2631
|
+
fallbacks: Optional[List[CreateChatCompletionFallbacks]] = None
|
|
2632
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2633
|
+
|
|
2634
|
+
retry: Optional[CreateChatCompletionRetry] = None
|
|
2635
|
+
r"""Retry configuration for the request"""
|
|
2636
|
+
|
|
2637
|
+
cache: Optional[CreateChatCompletionCache] = None
|
|
2638
|
+
r"""Cache configuration for the request."""
|
|
2639
|
+
|
|
2640
|
+
load_balancer: Optional[CreateChatCompletionLoadBalancer] = None
|
|
2641
|
+
r"""Load balancer configuration for the request."""
|
|
2642
|
+
|
|
2643
|
+
timeout: Optional[CreateChatCompletionTimeout] = None
|
|
2644
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2645
|
+
|
|
2646
|
+
orq: Annotated[
|
|
2647
|
+
Optional[CreateChatCompletionOrq],
|
|
2648
|
+
pydantic.Field(
|
|
2649
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2650
|
+
),
|
|
2651
|
+
] = None
|
|
2072
2652
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
2073
2653
|
|
|
2074
2654
|
stream: Optional[bool] = False
|
|
2075
2655
|
|
|
2076
2656
|
@model_serializer(mode="wrap")
|
|
2077
2657
|
def serialize_model(self, handler):
|
|
2078
|
-
optional_fields =
|
|
2079
|
-
|
|
2080
|
-
|
|
2081
|
-
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
|
-
|
|
2110
|
-
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
|
-
|
|
2119
|
-
|
|
2120
|
-
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
|
|
2658
|
+
optional_fields = set(
|
|
2659
|
+
[
|
|
2660
|
+
"metadata",
|
|
2661
|
+
"audio",
|
|
2662
|
+
"frequency_penalty",
|
|
2663
|
+
"max_tokens",
|
|
2664
|
+
"max_completion_tokens",
|
|
2665
|
+
"logprobs",
|
|
2666
|
+
"top_logprobs",
|
|
2667
|
+
"n",
|
|
2668
|
+
"presence_penalty",
|
|
2669
|
+
"response_format",
|
|
2670
|
+
"reasoning_effort",
|
|
2671
|
+
"verbosity",
|
|
2672
|
+
"seed",
|
|
2673
|
+
"stop",
|
|
2674
|
+
"stream_options",
|
|
2675
|
+
"thinking",
|
|
2676
|
+
"temperature",
|
|
2677
|
+
"top_p",
|
|
2678
|
+
"top_k",
|
|
2679
|
+
"tools",
|
|
2680
|
+
"tool_choice",
|
|
2681
|
+
"parallel_tool_calls",
|
|
2682
|
+
"modalities",
|
|
2683
|
+
"guardrails",
|
|
2684
|
+
"fallbacks",
|
|
2685
|
+
"retry",
|
|
2686
|
+
"cache",
|
|
2687
|
+
"load_balancer",
|
|
2688
|
+
"timeout",
|
|
2689
|
+
"orq",
|
|
2690
|
+
"stream",
|
|
2691
|
+
]
|
|
2692
|
+
)
|
|
2693
|
+
nullable_fields = set(
|
|
2694
|
+
[
|
|
2695
|
+
"audio",
|
|
2696
|
+
"frequency_penalty",
|
|
2697
|
+
"max_tokens",
|
|
2698
|
+
"max_completion_tokens",
|
|
2699
|
+
"logprobs",
|
|
2700
|
+
"top_logprobs",
|
|
2701
|
+
"n",
|
|
2702
|
+
"presence_penalty",
|
|
2703
|
+
"seed",
|
|
2704
|
+
"stop",
|
|
2705
|
+
"stream_options",
|
|
2706
|
+
"temperature",
|
|
2707
|
+
"top_p",
|
|
2708
|
+
"top_k",
|
|
2709
|
+
"modalities",
|
|
2710
|
+
]
|
|
2711
|
+
)
|
|
2125
2712
|
serialized = handler(self)
|
|
2126
|
-
|
|
2127
2713
|
m = {}
|
|
2128
2714
|
|
|
2129
2715
|
for n, f in type(self).model_fields.items():
|
|
2130
2716
|
k = f.alias or n
|
|
2131
2717
|
val = serialized.get(k)
|
|
2132
|
-
|
|
2133
|
-
|
|
2134
|
-
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
|
|
2144
|
-
):
|
|
2145
|
-
m[k] = val
|
|
2718
|
+
is_nullable_and_explicitly_set = (
|
|
2719
|
+
k in nullable_fields
|
|
2720
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2721
|
+
)
|
|
2722
|
+
|
|
2723
|
+
if val != UNSET_SENTINEL:
|
|
2724
|
+
if (
|
|
2725
|
+
val is not None
|
|
2726
|
+
or k not in optional_fields
|
|
2727
|
+
or is_nullable_and_explicitly_set
|
|
2728
|
+
):
|
|
2729
|
+
m[k] = val
|
|
2146
2730
|
|
|
2147
2731
|
return m
|
|
2148
2732
|
|
|
2149
2733
|
|
|
2150
|
-
|
|
2734
|
+
CreateChatCompletionRouterChatCompletionsFinishReason = Literal[
|
|
2151
2735
|
"stop",
|
|
2152
2736
|
"length",
|
|
2153
2737
|
"tool_calls",
|
|
@@ -2157,7 +2741,7 @@ CreateChatCompletionRouterFinishReason = Literal[
|
|
|
2157
2741
|
r"""The reason the model stopped generating tokens."""
|
|
2158
2742
|
|
|
2159
2743
|
|
|
2160
|
-
class
|
|
2744
|
+
class CreateChatCompletionRouterChatCompletionsResponseTopLogprobsTypedDict(TypedDict):
|
|
2161
2745
|
token: str
|
|
2162
2746
|
r"""The token."""
|
|
2163
2747
|
logprob: float
|
|
@@ -2166,7 +2750,7 @@ class CreateChatCompletionRouterResponseTopLogprobsTypedDict(TypedDict):
|
|
|
2166
2750
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2167
2751
|
|
|
2168
2752
|
|
|
2169
|
-
class
|
|
2753
|
+
class CreateChatCompletionRouterChatCompletionsResponseTopLogprobs(BaseModel):
|
|
2170
2754
|
token: str
|
|
2171
2755
|
r"""The token."""
|
|
2172
2756
|
|
|
@@ -2178,47 +2762,33 @@ class CreateChatCompletionRouterResponseTopLogprobs(BaseModel):
|
|
|
2178
2762
|
|
|
2179
2763
|
@model_serializer(mode="wrap")
|
|
2180
2764
|
def serialize_model(self, handler):
|
|
2181
|
-
optional_fields = []
|
|
2182
|
-
nullable_fields = ["bytes"]
|
|
2183
|
-
null_default_fields = []
|
|
2184
|
-
|
|
2185
2765
|
serialized = handler(self)
|
|
2186
|
-
|
|
2187
2766
|
m = {}
|
|
2188
2767
|
|
|
2189
2768
|
for n, f in type(self).model_fields.items():
|
|
2190
2769
|
k = f.alias or n
|
|
2191
2770
|
val = serialized.get(k)
|
|
2192
|
-
serialized.pop(k, None)
|
|
2193
|
-
|
|
2194
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
2195
|
-
is_set = (
|
|
2196
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2197
|
-
or k in null_default_fields
|
|
2198
|
-
) # pylint: disable=no-member
|
|
2199
2771
|
|
|
2200
|
-
if val
|
|
2201
|
-
m[k] = val
|
|
2202
|
-
elif val != UNSET_SENTINEL and (
|
|
2203
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2204
|
-
):
|
|
2772
|
+
if val != UNSET_SENTINEL:
|
|
2205
2773
|
m[k] = val
|
|
2206
2774
|
|
|
2207
2775
|
return m
|
|
2208
2776
|
|
|
2209
2777
|
|
|
2210
|
-
class
|
|
2778
|
+
class CreateChatCompletionRouterChatCompletionsContentTypedDict(TypedDict):
|
|
2211
2779
|
token: str
|
|
2212
2780
|
r"""The token."""
|
|
2213
2781
|
logprob: float
|
|
2214
2782
|
r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify that the token is very unlikely."""
|
|
2215
2783
|
bytes_: Nullable[List[float]]
|
|
2216
2784
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2217
|
-
top_logprobs: List[
|
|
2785
|
+
top_logprobs: List[
|
|
2786
|
+
CreateChatCompletionRouterChatCompletionsResponseTopLogprobsTypedDict
|
|
2787
|
+
]
|
|
2218
2788
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2219
2789
|
|
|
2220
2790
|
|
|
2221
|
-
class
|
|
2791
|
+
class CreateChatCompletionRouterChatCompletionsContent(BaseModel):
|
|
2222
2792
|
token: str
|
|
2223
2793
|
r"""The token."""
|
|
2224
2794
|
|
|
@@ -2228,41 +2798,27 @@ class CreateChatCompletionRouterContent(BaseModel):
|
|
|
2228
2798
|
bytes_: Annotated[Nullable[List[float]], pydantic.Field(alias="bytes")]
|
|
2229
2799
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2230
2800
|
|
|
2231
|
-
top_logprobs: List[
|
|
2801
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsResponseTopLogprobs]
|
|
2232
2802
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2233
2803
|
|
|
2234
2804
|
@model_serializer(mode="wrap")
|
|
2235
2805
|
def serialize_model(self, handler):
|
|
2236
|
-
optional_fields = []
|
|
2237
|
-
nullable_fields = ["bytes"]
|
|
2238
|
-
null_default_fields = []
|
|
2239
|
-
|
|
2240
2806
|
serialized = handler(self)
|
|
2241
|
-
|
|
2242
2807
|
m = {}
|
|
2243
2808
|
|
|
2244
2809
|
for n, f in type(self).model_fields.items():
|
|
2245
2810
|
k = f.alias or n
|
|
2246
2811
|
val = serialized.get(k)
|
|
2247
|
-
serialized.pop(k, None)
|
|
2248
2812
|
|
|
2249
|
-
|
|
2250
|
-
is_set = (
|
|
2251
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2252
|
-
or k in null_default_fields
|
|
2253
|
-
) # pylint: disable=no-member
|
|
2254
|
-
|
|
2255
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
2256
|
-
m[k] = val
|
|
2257
|
-
elif val != UNSET_SENTINEL and (
|
|
2258
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2259
|
-
):
|
|
2813
|
+
if val != UNSET_SENTINEL:
|
|
2260
2814
|
m[k] = val
|
|
2261
2815
|
|
|
2262
2816
|
return m
|
|
2263
2817
|
|
|
2264
2818
|
|
|
2265
|
-
class
|
|
2819
|
+
class CreateChatCompletionRouterChatCompletionsResponse200TopLogprobsTypedDict(
|
|
2820
|
+
TypedDict
|
|
2821
|
+
):
|
|
2266
2822
|
token: str
|
|
2267
2823
|
r"""The token."""
|
|
2268
2824
|
logprob: float
|
|
@@ -2271,7 +2827,7 @@ class CreateChatCompletionRouterResponse200TopLogprobsTypedDict(TypedDict):
|
|
|
2271
2827
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2272
2828
|
|
|
2273
2829
|
|
|
2274
|
-
class
|
|
2830
|
+
class CreateChatCompletionRouterChatCompletionsResponse200TopLogprobs(BaseModel):
|
|
2275
2831
|
token: str
|
|
2276
2832
|
r"""The token."""
|
|
2277
2833
|
|
|
@@ -2283,47 +2839,33 @@ class CreateChatCompletionRouterResponse200TopLogprobs(BaseModel):
|
|
|
2283
2839
|
|
|
2284
2840
|
@model_serializer(mode="wrap")
|
|
2285
2841
|
def serialize_model(self, handler):
|
|
2286
|
-
optional_fields = []
|
|
2287
|
-
nullable_fields = ["bytes"]
|
|
2288
|
-
null_default_fields = []
|
|
2289
|
-
|
|
2290
2842
|
serialized = handler(self)
|
|
2291
|
-
|
|
2292
2843
|
m = {}
|
|
2293
2844
|
|
|
2294
2845
|
for n, f in type(self).model_fields.items():
|
|
2295
2846
|
k = f.alias or n
|
|
2296
2847
|
val = serialized.get(k)
|
|
2297
|
-
serialized.pop(k, None)
|
|
2298
2848
|
|
|
2299
|
-
|
|
2300
|
-
is_set = (
|
|
2301
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2302
|
-
or k in null_default_fields
|
|
2303
|
-
) # pylint: disable=no-member
|
|
2304
|
-
|
|
2305
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
2306
|
-
m[k] = val
|
|
2307
|
-
elif val != UNSET_SENTINEL and (
|
|
2308
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2309
|
-
):
|
|
2849
|
+
if val != UNSET_SENTINEL:
|
|
2310
2850
|
m[k] = val
|
|
2311
2851
|
|
|
2312
2852
|
return m
|
|
2313
2853
|
|
|
2314
2854
|
|
|
2315
|
-
class
|
|
2855
|
+
class CreateChatCompletionRouterChatCompletionsRefusalTypedDict(TypedDict):
|
|
2316
2856
|
token: str
|
|
2317
2857
|
r"""The token."""
|
|
2318
2858
|
logprob: float
|
|
2319
2859
|
r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify that the token is very unlikely."""
|
|
2320
2860
|
bytes_: Nullable[List[float]]
|
|
2321
2861
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2322
|
-
top_logprobs: List[
|
|
2862
|
+
top_logprobs: List[
|
|
2863
|
+
CreateChatCompletionRouterChatCompletionsResponse200TopLogprobsTypedDict
|
|
2864
|
+
]
|
|
2323
2865
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2324
2866
|
|
|
2325
2867
|
|
|
2326
|
-
class
|
|
2868
|
+
class CreateChatCompletionRouterChatCompletionsRefusal(BaseModel):
|
|
2327
2869
|
token: str
|
|
2328
2870
|
r"""The token."""
|
|
2329
2871
|
|
|
@@ -2333,140 +2875,142 @@ class CreateChatCompletionRouterRefusal(BaseModel):
|
|
|
2333
2875
|
bytes_: Annotated[Nullable[List[float]], pydantic.Field(alias="bytes")]
|
|
2334
2876
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2335
2877
|
|
|
2336
|
-
top_logprobs: List[
|
|
2878
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsResponse200TopLogprobs]
|
|
2337
2879
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2338
2880
|
|
|
2339
2881
|
@model_serializer(mode="wrap")
|
|
2340
2882
|
def serialize_model(self, handler):
|
|
2341
|
-
optional_fields = []
|
|
2342
|
-
nullable_fields = ["bytes"]
|
|
2343
|
-
null_default_fields = []
|
|
2344
|
-
|
|
2345
2883
|
serialized = handler(self)
|
|
2346
|
-
|
|
2347
2884
|
m = {}
|
|
2348
2885
|
|
|
2349
2886
|
for n, f in type(self).model_fields.items():
|
|
2350
2887
|
k = f.alias or n
|
|
2351
2888
|
val = serialized.get(k)
|
|
2352
|
-
serialized.pop(k, None)
|
|
2353
|
-
|
|
2354
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
2355
|
-
is_set = (
|
|
2356
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2357
|
-
or k in null_default_fields
|
|
2358
|
-
) # pylint: disable=no-member
|
|
2359
2889
|
|
|
2360
|
-
if val
|
|
2361
|
-
m[k] = val
|
|
2362
|
-
elif val != UNSET_SENTINEL and (
|
|
2363
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2364
|
-
):
|
|
2890
|
+
if val != UNSET_SENTINEL:
|
|
2365
2891
|
m[k] = val
|
|
2366
2892
|
|
|
2367
2893
|
return m
|
|
2368
2894
|
|
|
2369
2895
|
|
|
2370
|
-
class
|
|
2896
|
+
class CreateChatCompletionRouterChatCompletionsLogprobsTypedDict(TypedDict):
|
|
2371
2897
|
r"""Log probability information for the choice."""
|
|
2372
2898
|
|
|
2373
|
-
content: Nullable[List[
|
|
2899
|
+
content: Nullable[List[CreateChatCompletionRouterChatCompletionsContentTypedDict]]
|
|
2374
2900
|
r"""A list of message content tokens with log probability information."""
|
|
2375
|
-
refusal: Nullable[List[
|
|
2901
|
+
refusal: Nullable[List[CreateChatCompletionRouterChatCompletionsRefusalTypedDict]]
|
|
2376
2902
|
r"""A list of message refusal tokens with log probability information."""
|
|
2377
2903
|
|
|
2378
2904
|
|
|
2379
|
-
class
|
|
2905
|
+
class CreateChatCompletionRouterChatCompletionsLogprobs(BaseModel):
|
|
2380
2906
|
r"""Log probability information for the choice."""
|
|
2381
2907
|
|
|
2382
|
-
content: Nullable[List[
|
|
2908
|
+
content: Nullable[List[CreateChatCompletionRouterChatCompletionsContent]]
|
|
2383
2909
|
r"""A list of message content tokens with log probability information."""
|
|
2384
2910
|
|
|
2385
|
-
refusal: Nullable[List[
|
|
2911
|
+
refusal: Nullable[List[CreateChatCompletionRouterChatCompletionsRefusal]]
|
|
2386
2912
|
r"""A list of message refusal tokens with log probability information."""
|
|
2387
2913
|
|
|
2388
2914
|
@model_serializer(mode="wrap")
|
|
2389
2915
|
def serialize_model(self, handler):
|
|
2390
|
-
optional_fields = []
|
|
2391
|
-
nullable_fields = ["content", "refusal"]
|
|
2392
|
-
null_default_fields = []
|
|
2393
|
-
|
|
2394
2916
|
serialized = handler(self)
|
|
2395
|
-
|
|
2396
2917
|
m = {}
|
|
2397
2918
|
|
|
2398
2919
|
for n, f in type(self).model_fields.items():
|
|
2399
2920
|
k = f.alias or n
|
|
2400
2921
|
val = serialized.get(k)
|
|
2401
|
-
serialized.pop(k, None)
|
|
2402
|
-
|
|
2403
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
2404
|
-
is_set = (
|
|
2405
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2406
|
-
or k in null_default_fields
|
|
2407
|
-
) # pylint: disable=no-member
|
|
2408
2922
|
|
|
2409
|
-
if val
|
|
2410
|
-
m[k] = val
|
|
2411
|
-
elif val != UNSET_SENTINEL and (
|
|
2412
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2413
|
-
):
|
|
2923
|
+
if val != UNSET_SENTINEL:
|
|
2414
2924
|
m[k] = val
|
|
2415
2925
|
|
|
2416
2926
|
return m
|
|
2417
2927
|
|
|
2418
2928
|
|
|
2419
|
-
|
|
2929
|
+
CreateChatCompletionRouterChatCompletionsResponse200Type = Literal["function",]
|
|
2420
2930
|
r"""The type of the tool. Currently, only `function` is supported."""
|
|
2421
2931
|
|
|
2422
2932
|
|
|
2423
|
-
class
|
|
2933
|
+
class CreateChatCompletionRouterChatCompletionsResponseFunctionTypedDict(TypedDict):
|
|
2424
2934
|
name: NotRequired[str]
|
|
2425
2935
|
r"""The name of the function."""
|
|
2426
2936
|
arguments: NotRequired[str]
|
|
2427
2937
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2428
2938
|
|
|
2429
2939
|
|
|
2430
|
-
class
|
|
2940
|
+
class CreateChatCompletionRouterChatCompletionsResponseFunction(BaseModel):
|
|
2431
2941
|
name: Optional[str] = None
|
|
2432
2942
|
r"""The name of the function."""
|
|
2433
2943
|
|
|
2434
2944
|
arguments: Optional[str] = None
|
|
2435
2945
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2436
2946
|
|
|
2947
|
+
@model_serializer(mode="wrap")
|
|
2948
|
+
def serialize_model(self, handler):
|
|
2949
|
+
optional_fields = set(["name", "arguments"])
|
|
2950
|
+
serialized = handler(self)
|
|
2951
|
+
m = {}
|
|
2952
|
+
|
|
2953
|
+
for n, f in type(self).model_fields.items():
|
|
2954
|
+
k = f.alias or n
|
|
2955
|
+
val = serialized.get(k)
|
|
2956
|
+
|
|
2957
|
+
if val != UNSET_SENTINEL:
|
|
2958
|
+
if val is not None or k not in optional_fields:
|
|
2959
|
+
m[k] = val
|
|
2437
2960
|
|
|
2438
|
-
|
|
2961
|
+
return m
|
|
2962
|
+
|
|
2963
|
+
|
|
2964
|
+
class CreateChatCompletionRouterChatCompletionsToolCallsTypedDict(TypedDict):
|
|
2439
2965
|
index: NotRequired[float]
|
|
2440
2966
|
r"""The index of the tool call."""
|
|
2441
2967
|
id: NotRequired[str]
|
|
2442
2968
|
r"""The ID of the tool call."""
|
|
2443
|
-
type: NotRequired[
|
|
2969
|
+
type: NotRequired[CreateChatCompletionRouterChatCompletionsResponse200Type]
|
|
2444
2970
|
r"""The type of the tool. Currently, only `function` is supported."""
|
|
2445
|
-
function: NotRequired[
|
|
2971
|
+
function: NotRequired[
|
|
2972
|
+
CreateChatCompletionRouterChatCompletionsResponseFunctionTypedDict
|
|
2973
|
+
]
|
|
2446
2974
|
thought_signature: NotRequired[str]
|
|
2447
2975
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models."""
|
|
2448
2976
|
|
|
2449
2977
|
|
|
2450
|
-
class
|
|
2978
|
+
class CreateChatCompletionRouterChatCompletionsToolCalls(BaseModel):
|
|
2451
2979
|
index: Optional[float] = None
|
|
2452
2980
|
r"""The index of the tool call."""
|
|
2453
2981
|
|
|
2454
2982
|
id: Optional[str] = None
|
|
2455
2983
|
r"""The ID of the tool call."""
|
|
2456
2984
|
|
|
2457
|
-
type: Optional[
|
|
2985
|
+
type: Optional[CreateChatCompletionRouterChatCompletionsResponse200Type] = None
|
|
2458
2986
|
r"""The type of the tool. Currently, only `function` is supported."""
|
|
2459
2987
|
|
|
2460
|
-
function: Optional[
|
|
2988
|
+
function: Optional[CreateChatCompletionRouterChatCompletionsResponseFunction] = None
|
|
2461
2989
|
|
|
2462
2990
|
thought_signature: Optional[str] = None
|
|
2463
2991
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models."""
|
|
2464
2992
|
|
|
2993
|
+
@model_serializer(mode="wrap")
|
|
2994
|
+
def serialize_model(self, handler):
|
|
2995
|
+
optional_fields = set(["index", "id", "type", "function", "thought_signature"])
|
|
2996
|
+
serialized = handler(self)
|
|
2997
|
+
m = {}
|
|
2998
|
+
|
|
2999
|
+
for n, f in type(self).model_fields.items():
|
|
3000
|
+
k = f.alias or n
|
|
3001
|
+
val = serialized.get(k)
|
|
2465
3002
|
|
|
2466
|
-
|
|
3003
|
+
if val != UNSET_SENTINEL:
|
|
3004
|
+
if val is not None or k not in optional_fields:
|
|
3005
|
+
m[k] = val
|
|
3006
|
+
|
|
3007
|
+
return m
|
|
3008
|
+
|
|
3009
|
+
|
|
3010
|
+
CreateChatCompletionRouterChatCompletionsRole = Literal["assistant",]
|
|
2467
3011
|
|
|
2468
3012
|
|
|
2469
|
-
class
|
|
3013
|
+
class CreateChatCompletionRouterChatCompletionsResponseAudioTypedDict(TypedDict):
|
|
2470
3014
|
r"""Audio response data in streaming mode."""
|
|
2471
3015
|
|
|
2472
3016
|
id: NotRequired[str]
|
|
@@ -2475,7 +3019,7 @@ class CreateChatCompletionRouterResponseAudioTypedDict(TypedDict):
|
|
|
2475
3019
|
expires_at: NotRequired[int]
|
|
2476
3020
|
|
|
2477
3021
|
|
|
2478
|
-
class
|
|
3022
|
+
class CreateChatCompletionRouterChatCompletionsResponseAudio(BaseModel):
|
|
2479
3023
|
r"""Audio response data in streaming mode."""
|
|
2480
3024
|
|
|
2481
3025
|
id: Optional[str] = None
|
|
@@ -2486,6 +3030,22 @@ class CreateChatCompletionRouterResponseAudio(BaseModel):
|
|
|
2486
3030
|
|
|
2487
3031
|
expires_at: Optional[int] = None
|
|
2488
3032
|
|
|
3033
|
+
@model_serializer(mode="wrap")
|
|
3034
|
+
def serialize_model(self, handler):
|
|
3035
|
+
optional_fields = set(["id", "transcript", "data", "expires_at"])
|
|
3036
|
+
serialized = handler(self)
|
|
3037
|
+
m = {}
|
|
3038
|
+
|
|
3039
|
+
for n, f in type(self).model_fields.items():
|
|
3040
|
+
k = f.alias or n
|
|
3041
|
+
val = serialized.get(k)
|
|
3042
|
+
|
|
3043
|
+
if val != UNSET_SENTINEL:
|
|
3044
|
+
if val is not None or k not in optional_fields:
|
|
3045
|
+
m[k] = val
|
|
3046
|
+
|
|
3047
|
+
return m
|
|
3048
|
+
|
|
2489
3049
|
|
|
2490
3050
|
class DeltaTypedDict(TypedDict):
|
|
2491
3051
|
r"""A chat completion delta generated by streamed model responses."""
|
|
@@ -2493,15 +3053,19 @@ class DeltaTypedDict(TypedDict):
|
|
|
2493
3053
|
content: NotRequired[Nullable[str]]
|
|
2494
3054
|
r"""The contents of the chunk message."""
|
|
2495
3055
|
refusal: NotRequired[Nullable[str]]
|
|
2496
|
-
tool_calls: NotRequired[
|
|
2497
|
-
|
|
3056
|
+
tool_calls: NotRequired[
|
|
3057
|
+
List[CreateChatCompletionRouterChatCompletionsToolCallsTypedDict]
|
|
3058
|
+
]
|
|
3059
|
+
role: NotRequired[CreateChatCompletionRouterChatCompletionsRole]
|
|
2498
3060
|
reasoning: NotRequired[str]
|
|
2499
3061
|
r"""Internal thought process of the model"""
|
|
2500
3062
|
reasoning_signature: NotRequired[str]
|
|
2501
3063
|
r"""The signature holds a cryptographic token which verifies that the thinking block was generated by the model, and is verified when thinking is part of a multiturn conversation. This value should not be modified and should always be sent to the API when the reasoning is redacted. Currently only supported by `Anthropic`."""
|
|
2502
3064
|
redacted_reasoning: NotRequired[str]
|
|
2503
3065
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
2504
|
-
audio: NotRequired[
|
|
3066
|
+
audio: NotRequired[
|
|
3067
|
+
Nullable[CreateChatCompletionRouterChatCompletionsResponseAudioTypedDict]
|
|
3068
|
+
]
|
|
2505
3069
|
r"""Audio response data in streaming mode."""
|
|
2506
3070
|
|
|
2507
3071
|
|
|
@@ -2513,9 +3077,11 @@ class Delta(BaseModel):
|
|
|
2513
3077
|
|
|
2514
3078
|
refusal: OptionalNullable[str] = UNSET
|
|
2515
3079
|
|
|
2516
|
-
tool_calls: Optional[List[
|
|
3080
|
+
tool_calls: Optional[List[CreateChatCompletionRouterChatCompletionsToolCalls]] = (
|
|
3081
|
+
None
|
|
3082
|
+
)
|
|
2517
3083
|
|
|
2518
|
-
role: Optional[
|
|
3084
|
+
role: Optional[CreateChatCompletionRouterChatCompletionsRole] = None
|
|
2519
3085
|
|
|
2520
3086
|
reasoning: Optional[str] = None
|
|
2521
3087
|
r"""Internal thought process of the model"""
|
|
@@ -2526,62 +3092,63 @@ class Delta(BaseModel):
|
|
|
2526
3092
|
redacted_reasoning: Optional[str] = None
|
|
2527
3093
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
2528
3094
|
|
|
2529
|
-
audio: OptionalNullable[
|
|
3095
|
+
audio: OptionalNullable[CreateChatCompletionRouterChatCompletionsResponseAudio] = (
|
|
3096
|
+
UNSET
|
|
3097
|
+
)
|
|
2530
3098
|
r"""Audio response data in streaming mode."""
|
|
2531
3099
|
|
|
2532
3100
|
@model_serializer(mode="wrap")
|
|
2533
3101
|
def serialize_model(self, handler):
|
|
2534
|
-
optional_fields =
|
|
2535
|
-
|
|
2536
|
-
|
|
2537
|
-
|
|
2538
|
-
|
|
2539
|
-
|
|
2540
|
-
|
|
2541
|
-
|
|
2542
|
-
|
|
2543
|
-
|
|
2544
|
-
|
|
2545
|
-
|
|
2546
|
-
|
|
3102
|
+
optional_fields = set(
|
|
3103
|
+
[
|
|
3104
|
+
"content",
|
|
3105
|
+
"refusal",
|
|
3106
|
+
"tool_calls",
|
|
3107
|
+
"role",
|
|
3108
|
+
"reasoning",
|
|
3109
|
+
"reasoning_signature",
|
|
3110
|
+
"redacted_reasoning",
|
|
3111
|
+
"audio",
|
|
3112
|
+
]
|
|
3113
|
+
)
|
|
3114
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
2547
3115
|
serialized = handler(self)
|
|
2548
|
-
|
|
2549
3116
|
m = {}
|
|
2550
3117
|
|
|
2551
3118
|
for n, f in type(self).model_fields.items():
|
|
2552
3119
|
k = f.alias or n
|
|
2553
3120
|
val = serialized.get(k)
|
|
2554
|
-
|
|
2555
|
-
|
|
2556
|
-
|
|
2557
|
-
|
|
2558
|
-
|
|
2559
|
-
|
|
2560
|
-
|
|
2561
|
-
|
|
2562
|
-
|
|
2563
|
-
|
|
2564
|
-
|
|
2565
|
-
|
|
2566
|
-
):
|
|
2567
|
-
m[k] = val
|
|
3121
|
+
is_nullable_and_explicitly_set = (
|
|
3122
|
+
k in nullable_fields
|
|
3123
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3124
|
+
)
|
|
3125
|
+
|
|
3126
|
+
if val != UNSET_SENTINEL:
|
|
3127
|
+
if (
|
|
3128
|
+
val is not None
|
|
3129
|
+
or k not in optional_fields
|
|
3130
|
+
or is_nullable_and_explicitly_set
|
|
3131
|
+
):
|
|
3132
|
+
m[k] = val
|
|
2568
3133
|
|
|
2569
3134
|
return m
|
|
2570
3135
|
|
|
2571
3136
|
|
|
2572
|
-
class
|
|
2573
|
-
finish_reason: Nullable[
|
|
3137
|
+
class CreateChatCompletionRouterChatCompletionsChoicesTypedDict(TypedDict):
|
|
3138
|
+
finish_reason: Nullable[CreateChatCompletionRouterChatCompletionsFinishReason]
|
|
2574
3139
|
r"""The reason the model stopped generating tokens."""
|
|
2575
3140
|
delta: DeltaTypedDict
|
|
2576
3141
|
r"""A chat completion delta generated by streamed model responses."""
|
|
2577
3142
|
index: NotRequired[float]
|
|
2578
3143
|
r"""The index of the choice in the list of choices."""
|
|
2579
|
-
logprobs: NotRequired[
|
|
3144
|
+
logprobs: NotRequired[
|
|
3145
|
+
Nullable[CreateChatCompletionRouterChatCompletionsLogprobsTypedDict]
|
|
3146
|
+
]
|
|
2580
3147
|
r"""Log probability information for the choice."""
|
|
2581
3148
|
|
|
2582
3149
|
|
|
2583
|
-
class
|
|
2584
|
-
finish_reason: Nullable[
|
|
3150
|
+
class CreateChatCompletionRouterChatCompletionsChoices(BaseModel):
|
|
3151
|
+
finish_reason: Nullable[CreateChatCompletionRouterChatCompletionsFinishReason]
|
|
2585
3152
|
r"""The reason the model stopped generating tokens."""
|
|
2586
3153
|
|
|
2587
3154
|
delta: Delta
|
|
@@ -2590,48 +3157,45 @@ class CreateChatCompletionRouterChoices(BaseModel):
|
|
|
2590
3157
|
index: Optional[float] = 0
|
|
2591
3158
|
r"""The index of the choice in the list of choices."""
|
|
2592
3159
|
|
|
2593
|
-
logprobs: OptionalNullable[
|
|
3160
|
+
logprobs: OptionalNullable[CreateChatCompletionRouterChatCompletionsLogprobs] = (
|
|
3161
|
+
UNSET
|
|
3162
|
+
)
|
|
2594
3163
|
r"""Log probability information for the choice."""
|
|
2595
3164
|
|
|
2596
3165
|
@model_serializer(mode="wrap")
|
|
2597
3166
|
def serialize_model(self, handler):
|
|
2598
|
-
optional_fields = ["index", "logprobs"]
|
|
2599
|
-
nullable_fields = ["finish_reason", "logprobs"]
|
|
2600
|
-
null_default_fields = []
|
|
2601
|
-
|
|
3167
|
+
optional_fields = set(["index", "logprobs"])
|
|
3168
|
+
nullable_fields = set(["finish_reason", "logprobs"])
|
|
2602
3169
|
serialized = handler(self)
|
|
2603
|
-
|
|
2604
3170
|
m = {}
|
|
2605
3171
|
|
|
2606
3172
|
for n, f in type(self).model_fields.items():
|
|
2607
3173
|
k = f.alias or n
|
|
2608
3174
|
val = serialized.get(k)
|
|
2609
|
-
|
|
2610
|
-
|
|
2611
|
-
|
|
2612
|
-
|
|
2613
|
-
|
|
2614
|
-
|
|
2615
|
-
|
|
2616
|
-
|
|
2617
|
-
|
|
2618
|
-
|
|
2619
|
-
|
|
2620
|
-
|
|
2621
|
-
):
|
|
2622
|
-
m[k] = val
|
|
3175
|
+
is_nullable_and_explicitly_set = (
|
|
3176
|
+
k in nullable_fields
|
|
3177
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3178
|
+
)
|
|
3179
|
+
|
|
3180
|
+
if val != UNSET_SENTINEL:
|
|
3181
|
+
if (
|
|
3182
|
+
val is not None
|
|
3183
|
+
or k not in optional_fields
|
|
3184
|
+
or is_nullable_and_explicitly_set
|
|
3185
|
+
):
|
|
3186
|
+
m[k] = val
|
|
2623
3187
|
|
|
2624
3188
|
return m
|
|
2625
3189
|
|
|
2626
3190
|
|
|
2627
|
-
class
|
|
3191
|
+
class CreateChatCompletionRouterChatCompletionsPromptTokensDetailsTypedDict(TypedDict):
|
|
2628
3192
|
cached_tokens: NotRequired[Nullable[int]]
|
|
2629
3193
|
cache_creation_tokens: NotRequired[Nullable[int]]
|
|
2630
3194
|
audio_tokens: NotRequired[Nullable[int]]
|
|
2631
3195
|
r"""The number of audio input tokens consumed by the request."""
|
|
2632
3196
|
|
|
2633
3197
|
|
|
2634
|
-
class
|
|
3198
|
+
class CreateChatCompletionRouterChatCompletionsPromptTokensDetails(BaseModel):
|
|
2635
3199
|
cached_tokens: OptionalNullable[int] = UNSET
|
|
2636
3200
|
|
|
2637
3201
|
cache_creation_tokens: OptionalNullable[int] = UNSET
|
|
@@ -2641,36 +3205,37 @@ class CreateChatCompletionRouterPromptTokensDetails(BaseModel):
|
|
|
2641
3205
|
|
|
2642
3206
|
@model_serializer(mode="wrap")
|
|
2643
3207
|
def serialize_model(self, handler):
|
|
2644
|
-
optional_fields =
|
|
2645
|
-
|
|
2646
|
-
|
|
2647
|
-
|
|
3208
|
+
optional_fields = set(
|
|
3209
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3210
|
+
)
|
|
3211
|
+
nullable_fields = set(
|
|
3212
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3213
|
+
)
|
|
2648
3214
|
serialized = handler(self)
|
|
2649
|
-
|
|
2650
3215
|
m = {}
|
|
2651
3216
|
|
|
2652
3217
|
for n, f in type(self).model_fields.items():
|
|
2653
3218
|
k = f.alias or n
|
|
2654
3219
|
val = serialized.get(k)
|
|
2655
|
-
|
|
2656
|
-
|
|
2657
|
-
|
|
2658
|
-
|
|
2659
|
-
|
|
2660
|
-
|
|
2661
|
-
|
|
2662
|
-
|
|
2663
|
-
|
|
2664
|
-
|
|
2665
|
-
|
|
2666
|
-
|
|
2667
|
-
):
|
|
2668
|
-
m[k] = val
|
|
3220
|
+
is_nullable_and_explicitly_set = (
|
|
3221
|
+
k in nullable_fields
|
|
3222
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3223
|
+
)
|
|
3224
|
+
|
|
3225
|
+
if val != UNSET_SENTINEL:
|
|
3226
|
+
if (
|
|
3227
|
+
val is not None
|
|
3228
|
+
or k not in optional_fields
|
|
3229
|
+
or is_nullable_and_explicitly_set
|
|
3230
|
+
):
|
|
3231
|
+
m[k] = val
|
|
2669
3232
|
|
|
2670
3233
|
return m
|
|
2671
3234
|
|
|
2672
3235
|
|
|
2673
|
-
class
|
|
3236
|
+
class CreateChatCompletionRouterChatCompletionsCompletionTokensDetailsTypedDict(
|
|
3237
|
+
TypedDict
|
|
3238
|
+
):
|
|
2674
3239
|
reasoning_tokens: NotRequired[Nullable[float]]
|
|
2675
3240
|
accepted_prediction_tokens: NotRequired[Nullable[float]]
|
|
2676
3241
|
rejected_prediction_tokens: NotRequired[Nullable[float]]
|
|
@@ -2678,7 +3243,7 @@ class CreateChatCompletionRouterCompletionTokensDetailsTypedDict(TypedDict):
|
|
|
2678
3243
|
r"""The number of audio output tokens produced by the response."""
|
|
2679
3244
|
|
|
2680
3245
|
|
|
2681
|
-
class
|
|
3246
|
+
class CreateChatCompletionRouterChatCompletionsCompletionTokensDetails(BaseModel):
|
|
2682
3247
|
reasoning_tokens: OptionalNullable[float] = UNSET
|
|
2683
3248
|
|
|
2684
3249
|
accepted_prediction_tokens: OptionalNullable[float] = UNSET
|
|
@@ -2690,46 +3255,45 @@ class CreateChatCompletionRouterCompletionTokensDetails(BaseModel):
|
|
|
2690
3255
|
|
|
2691
3256
|
@model_serializer(mode="wrap")
|
|
2692
3257
|
def serialize_model(self, handler):
|
|
2693
|
-
optional_fields =
|
|
2694
|
-
|
|
2695
|
-
|
|
2696
|
-
|
|
2697
|
-
|
|
2698
|
-
|
|
2699
|
-
|
|
2700
|
-
|
|
2701
|
-
|
|
2702
|
-
|
|
2703
|
-
|
|
2704
|
-
|
|
2705
|
-
|
|
2706
|
-
|
|
3258
|
+
optional_fields = set(
|
|
3259
|
+
[
|
|
3260
|
+
"reasoning_tokens",
|
|
3261
|
+
"accepted_prediction_tokens",
|
|
3262
|
+
"rejected_prediction_tokens",
|
|
3263
|
+
"audio_tokens",
|
|
3264
|
+
]
|
|
3265
|
+
)
|
|
3266
|
+
nullable_fields = set(
|
|
3267
|
+
[
|
|
3268
|
+
"reasoning_tokens",
|
|
3269
|
+
"accepted_prediction_tokens",
|
|
3270
|
+
"rejected_prediction_tokens",
|
|
3271
|
+
"audio_tokens",
|
|
3272
|
+
]
|
|
3273
|
+
)
|
|
2707
3274
|
serialized = handler(self)
|
|
2708
|
-
|
|
2709
3275
|
m = {}
|
|
2710
3276
|
|
|
2711
3277
|
for n, f in type(self).model_fields.items():
|
|
2712
3278
|
k = f.alias or n
|
|
2713
3279
|
val = serialized.get(k)
|
|
2714
|
-
|
|
2715
|
-
|
|
2716
|
-
|
|
2717
|
-
|
|
2718
|
-
|
|
2719
|
-
|
|
2720
|
-
|
|
2721
|
-
|
|
2722
|
-
|
|
2723
|
-
|
|
2724
|
-
|
|
2725
|
-
|
|
2726
|
-
):
|
|
2727
|
-
m[k] = val
|
|
3280
|
+
is_nullable_and_explicitly_set = (
|
|
3281
|
+
k in nullable_fields
|
|
3282
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3283
|
+
)
|
|
3284
|
+
|
|
3285
|
+
if val != UNSET_SENTINEL:
|
|
3286
|
+
if (
|
|
3287
|
+
val is not None
|
|
3288
|
+
or k not in optional_fields
|
|
3289
|
+
or is_nullable_and_explicitly_set
|
|
3290
|
+
):
|
|
3291
|
+
m[k] = val
|
|
2728
3292
|
|
|
2729
3293
|
return m
|
|
2730
3294
|
|
|
2731
3295
|
|
|
2732
|
-
class
|
|
3296
|
+
class CreateChatCompletionRouterChatCompletionsUsageTypedDict(TypedDict):
|
|
2733
3297
|
r"""Usage statistics for the completion request."""
|
|
2734
3298
|
|
|
2735
3299
|
completion_tokens: NotRequired[float]
|
|
@@ -2739,14 +3303,16 @@ class CreateChatCompletionRouterUsageTypedDict(TypedDict):
|
|
|
2739
3303
|
total_tokens: NotRequired[float]
|
|
2740
3304
|
r"""Total number of tokens used in the request (prompt + completion)."""
|
|
2741
3305
|
prompt_tokens_details: NotRequired[
|
|
2742
|
-
Nullable[
|
|
3306
|
+
Nullable[CreateChatCompletionRouterChatCompletionsPromptTokensDetailsTypedDict]
|
|
2743
3307
|
]
|
|
2744
3308
|
completion_tokens_details: NotRequired[
|
|
2745
|
-
Nullable[
|
|
3309
|
+
Nullable[
|
|
3310
|
+
CreateChatCompletionRouterChatCompletionsCompletionTokensDetailsTypedDict
|
|
3311
|
+
]
|
|
2746
3312
|
]
|
|
2747
3313
|
|
|
2748
3314
|
|
|
2749
|
-
class
|
|
3315
|
+
class CreateChatCompletionRouterChatCompletionsUsage(BaseModel):
|
|
2750
3316
|
r"""Usage statistics for the completion request."""
|
|
2751
3317
|
|
|
2752
3318
|
completion_tokens: Optional[float] = None
|
|
@@ -2759,51 +3325,48 @@ class CreateChatCompletionRouterUsage(BaseModel):
|
|
|
2759
3325
|
r"""Total number of tokens used in the request (prompt + completion)."""
|
|
2760
3326
|
|
|
2761
3327
|
prompt_tokens_details: OptionalNullable[
|
|
2762
|
-
|
|
3328
|
+
CreateChatCompletionRouterChatCompletionsPromptTokensDetails
|
|
2763
3329
|
] = UNSET
|
|
2764
3330
|
|
|
2765
3331
|
completion_tokens_details: OptionalNullable[
|
|
2766
|
-
|
|
3332
|
+
CreateChatCompletionRouterChatCompletionsCompletionTokensDetails
|
|
2767
3333
|
] = UNSET
|
|
2768
3334
|
|
|
2769
3335
|
@model_serializer(mode="wrap")
|
|
2770
3336
|
def serialize_model(self, handler):
|
|
2771
|
-
optional_fields =
|
|
2772
|
-
|
|
2773
|
-
|
|
2774
|
-
|
|
2775
|
-
|
|
2776
|
-
|
|
2777
|
-
|
|
2778
|
-
|
|
2779
|
-
|
|
2780
|
-
|
|
3337
|
+
optional_fields = set(
|
|
3338
|
+
[
|
|
3339
|
+
"completion_tokens",
|
|
3340
|
+
"prompt_tokens",
|
|
3341
|
+
"total_tokens",
|
|
3342
|
+
"prompt_tokens_details",
|
|
3343
|
+
"completion_tokens_details",
|
|
3344
|
+
]
|
|
3345
|
+
)
|
|
3346
|
+
nullable_fields = set(["prompt_tokens_details", "completion_tokens_details"])
|
|
2781
3347
|
serialized = handler(self)
|
|
2782
|
-
|
|
2783
3348
|
m = {}
|
|
2784
3349
|
|
|
2785
3350
|
for n, f in type(self).model_fields.items():
|
|
2786
3351
|
k = f.alias or n
|
|
2787
3352
|
val = serialized.get(k)
|
|
2788
|
-
|
|
2789
|
-
|
|
2790
|
-
|
|
2791
|
-
|
|
2792
|
-
|
|
2793
|
-
|
|
2794
|
-
|
|
2795
|
-
|
|
2796
|
-
|
|
2797
|
-
|
|
2798
|
-
|
|
2799
|
-
|
|
2800
|
-
):
|
|
2801
|
-
m[k] = val
|
|
3353
|
+
is_nullable_and_explicitly_set = (
|
|
3354
|
+
k in nullable_fields
|
|
3355
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3356
|
+
)
|
|
3357
|
+
|
|
3358
|
+
if val != UNSET_SENTINEL:
|
|
3359
|
+
if (
|
|
3360
|
+
val is not None
|
|
3361
|
+
or k not in optional_fields
|
|
3362
|
+
or is_nullable_and_explicitly_set
|
|
3363
|
+
):
|
|
3364
|
+
m[k] = val
|
|
2802
3365
|
|
|
2803
3366
|
return m
|
|
2804
3367
|
|
|
2805
3368
|
|
|
2806
|
-
|
|
3369
|
+
CreateChatCompletionRouterChatCompletionsObject = Literal["chat.completion.chunk",]
|
|
2807
3370
|
|
|
2808
3371
|
|
|
2809
3372
|
class CreateChatCompletionDataTypedDict(TypedDict):
|
|
@@ -2811,16 +3374,18 @@ class CreateChatCompletionDataTypedDict(TypedDict):
|
|
|
2811
3374
|
|
|
2812
3375
|
id: str
|
|
2813
3376
|
r"""A unique identifier for the chat completion."""
|
|
2814
|
-
choices: List[
|
|
3377
|
+
choices: List[CreateChatCompletionRouterChatCompletionsChoicesTypedDict]
|
|
2815
3378
|
r"""A list of chat completion choices. Can contain more than one elements if n is greater than 1. Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}."""
|
|
2816
3379
|
created: float
|
|
2817
3380
|
r"""The Unix timestamp (in seconds) of when the chat completion was created."""
|
|
2818
3381
|
model: str
|
|
2819
3382
|
r"""The model used for the chat completion."""
|
|
2820
|
-
object:
|
|
3383
|
+
object: CreateChatCompletionRouterChatCompletionsObject
|
|
2821
3384
|
system_fingerprint: NotRequired[Nullable[str]]
|
|
2822
3385
|
r"""This fingerprint represents the backend configuration that the model runs with."""
|
|
2823
|
-
usage: NotRequired[
|
|
3386
|
+
usage: NotRequired[
|
|
3387
|
+
Nullable[CreateChatCompletionRouterChatCompletionsUsageTypedDict]
|
|
3388
|
+
]
|
|
2824
3389
|
r"""Usage statistics for the completion request."""
|
|
2825
3390
|
|
|
2826
3391
|
|
|
@@ -2830,7 +3395,7 @@ class CreateChatCompletionData(BaseModel):
|
|
|
2830
3395
|
id: str
|
|
2831
3396
|
r"""A unique identifier for the chat completion."""
|
|
2832
3397
|
|
|
2833
|
-
choices: List[
|
|
3398
|
+
choices: List[CreateChatCompletionRouterChatCompletionsChoices]
|
|
2834
3399
|
r"""A list of chat completion choices. Can contain more than one elements if n is greater than 1. Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}."""
|
|
2835
3400
|
|
|
2836
3401
|
created: float
|
|
@@ -2839,58 +3404,69 @@ class CreateChatCompletionData(BaseModel):
|
|
|
2839
3404
|
model: str
|
|
2840
3405
|
r"""The model used for the chat completion."""
|
|
2841
3406
|
|
|
2842
|
-
object:
|
|
3407
|
+
object: CreateChatCompletionRouterChatCompletionsObject
|
|
2843
3408
|
|
|
2844
3409
|
system_fingerprint: OptionalNullable[str] = UNSET
|
|
2845
3410
|
r"""This fingerprint represents the backend configuration that the model runs with."""
|
|
2846
3411
|
|
|
2847
|
-
usage: OptionalNullable[
|
|
3412
|
+
usage: OptionalNullable[CreateChatCompletionRouterChatCompletionsUsage] = UNSET
|
|
2848
3413
|
r"""Usage statistics for the completion request."""
|
|
2849
3414
|
|
|
2850
3415
|
@model_serializer(mode="wrap")
|
|
2851
3416
|
def serialize_model(self, handler):
|
|
2852
|
-
optional_fields = ["system_fingerprint", "usage"]
|
|
2853
|
-
nullable_fields = ["system_fingerprint", "usage"]
|
|
2854
|
-
null_default_fields = []
|
|
2855
|
-
|
|
3417
|
+
optional_fields = set(["system_fingerprint", "usage"])
|
|
3418
|
+
nullable_fields = set(["system_fingerprint", "usage"])
|
|
2856
3419
|
serialized = handler(self)
|
|
2857
|
-
|
|
2858
3420
|
m = {}
|
|
2859
3421
|
|
|
2860
3422
|
for n, f in type(self).model_fields.items():
|
|
2861
3423
|
k = f.alias or n
|
|
2862
3424
|
val = serialized.get(k)
|
|
2863
|
-
|
|
2864
|
-
|
|
2865
|
-
|
|
2866
|
-
|
|
2867
|
-
|
|
2868
|
-
|
|
2869
|
-
|
|
2870
|
-
|
|
2871
|
-
|
|
2872
|
-
|
|
2873
|
-
|
|
2874
|
-
|
|
2875
|
-
):
|
|
2876
|
-
m[k] = val
|
|
3425
|
+
is_nullable_and_explicitly_set = (
|
|
3426
|
+
k in nullable_fields
|
|
3427
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3428
|
+
)
|
|
3429
|
+
|
|
3430
|
+
if val != UNSET_SENTINEL:
|
|
3431
|
+
if (
|
|
3432
|
+
val is not None
|
|
3433
|
+
or k not in optional_fields
|
|
3434
|
+
or is_nullable_and_explicitly_set
|
|
3435
|
+
):
|
|
3436
|
+
m[k] = val
|
|
2877
3437
|
|
|
2878
3438
|
return m
|
|
2879
3439
|
|
|
2880
3440
|
|
|
2881
|
-
class
|
|
3441
|
+
class CreateChatCompletionRouterChatCompletionsResponseBodyTypedDict(TypedDict):
|
|
2882
3442
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2883
3443
|
|
|
2884
3444
|
data: NotRequired[CreateChatCompletionDataTypedDict]
|
|
2885
3445
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2886
3446
|
|
|
2887
3447
|
|
|
2888
|
-
class
|
|
3448
|
+
class CreateChatCompletionRouterChatCompletionsResponseBody(BaseModel):
|
|
2889
3449
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2890
3450
|
|
|
2891
3451
|
data: Optional[CreateChatCompletionData] = None
|
|
2892
3452
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2893
3453
|
|
|
3454
|
+
@model_serializer(mode="wrap")
|
|
3455
|
+
def serialize_model(self, handler):
|
|
3456
|
+
optional_fields = set(["data"])
|
|
3457
|
+
serialized = handler(self)
|
|
3458
|
+
m = {}
|
|
3459
|
+
|
|
3460
|
+
for n, f in type(self).model_fields.items():
|
|
3461
|
+
k = f.alias or n
|
|
3462
|
+
val = serialized.get(k)
|
|
3463
|
+
|
|
3464
|
+
if val != UNSET_SENTINEL:
|
|
3465
|
+
if val is not None or k not in optional_fields:
|
|
3466
|
+
m[k] = val
|
|
3467
|
+
|
|
3468
|
+
return m
|
|
3469
|
+
|
|
2894
3470
|
|
|
2895
3471
|
CreateChatCompletionFinishReason = Literal[
|
|
2896
3472
|
"stop",
|
|
@@ -2902,29 +3478,45 @@ CreateChatCompletionFinishReason = Literal[
|
|
|
2902
3478
|
r"""The reason the model stopped generating tokens."""
|
|
2903
3479
|
|
|
2904
3480
|
|
|
2905
|
-
|
|
3481
|
+
CreateChatCompletionRouterChatCompletionsResponseType = Literal["function",]
|
|
2906
3482
|
|
|
2907
3483
|
|
|
2908
|
-
class
|
|
3484
|
+
class CreateChatCompletionRouterChatCompletionsFunctionTypedDict(TypedDict):
|
|
2909
3485
|
name: NotRequired[str]
|
|
2910
3486
|
r"""The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2911
3487
|
arguments: NotRequired[str]
|
|
2912
3488
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2913
3489
|
|
|
2914
3490
|
|
|
2915
|
-
class
|
|
3491
|
+
class CreateChatCompletionRouterChatCompletionsFunction(BaseModel):
|
|
2916
3492
|
name: Optional[str] = None
|
|
2917
3493
|
r"""The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2918
3494
|
|
|
2919
3495
|
arguments: Optional[str] = None
|
|
2920
3496
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2921
3497
|
|
|
3498
|
+
@model_serializer(mode="wrap")
|
|
3499
|
+
def serialize_model(self, handler):
|
|
3500
|
+
optional_fields = set(["name", "arguments"])
|
|
3501
|
+
serialized = handler(self)
|
|
3502
|
+
m = {}
|
|
3503
|
+
|
|
3504
|
+
for n, f in type(self).model_fields.items():
|
|
3505
|
+
k = f.alias or n
|
|
3506
|
+
val = serialized.get(k)
|
|
3507
|
+
|
|
3508
|
+
if val != UNSET_SENTINEL:
|
|
3509
|
+
if val is not None or k not in optional_fields:
|
|
3510
|
+
m[k] = val
|
|
3511
|
+
|
|
3512
|
+
return m
|
|
3513
|
+
|
|
2922
3514
|
|
|
2923
3515
|
class CreateChatCompletionToolCallsTypedDict(TypedDict):
|
|
2924
3516
|
index: NotRequired[float]
|
|
2925
3517
|
id: NotRequired[str]
|
|
2926
|
-
type: NotRequired[
|
|
2927
|
-
function: NotRequired[
|
|
3518
|
+
type: NotRequired[CreateChatCompletionRouterChatCompletionsResponseType]
|
|
3519
|
+
function: NotRequired[CreateChatCompletionRouterChatCompletionsFunctionTypedDict]
|
|
2928
3520
|
thought_signature: NotRequired[str]
|
|
2929
3521
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
2930
3522
|
|
|
@@ -2934,18 +3526,34 @@ class CreateChatCompletionToolCalls(BaseModel):
|
|
|
2934
3526
|
|
|
2935
3527
|
id: Optional[str] = None
|
|
2936
3528
|
|
|
2937
|
-
type: Optional[
|
|
3529
|
+
type: Optional[CreateChatCompletionRouterChatCompletionsResponseType] = None
|
|
2938
3530
|
|
|
2939
|
-
function: Optional[
|
|
3531
|
+
function: Optional[CreateChatCompletionRouterChatCompletionsFunction] = None
|
|
2940
3532
|
|
|
2941
3533
|
thought_signature: Optional[str] = None
|
|
2942
3534
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
2943
3535
|
|
|
3536
|
+
@model_serializer(mode="wrap")
|
|
3537
|
+
def serialize_model(self, handler):
|
|
3538
|
+
optional_fields = set(["index", "id", "type", "function", "thought_signature"])
|
|
3539
|
+
serialized = handler(self)
|
|
3540
|
+
m = {}
|
|
3541
|
+
|
|
3542
|
+
for n, f in type(self).model_fields.items():
|
|
3543
|
+
k = f.alias or n
|
|
3544
|
+
val = serialized.get(k)
|
|
3545
|
+
|
|
3546
|
+
if val != UNSET_SENTINEL:
|
|
3547
|
+
if val is not None or k not in optional_fields:
|
|
3548
|
+
m[k] = val
|
|
3549
|
+
|
|
3550
|
+
return m
|
|
3551
|
+
|
|
2944
3552
|
|
|
2945
3553
|
CreateChatCompletionRole = Literal["assistant",]
|
|
2946
3554
|
|
|
2947
3555
|
|
|
2948
|
-
class
|
|
3556
|
+
class CreateChatCompletionRouterChatCompletionsAudioTypedDict(TypedDict):
|
|
2949
3557
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
2950
3558
|
|
|
2951
3559
|
id: str
|
|
@@ -2954,7 +3562,7 @@ class CreateChatCompletionRouterAudioTypedDict(TypedDict):
|
|
|
2954
3562
|
transcript: str
|
|
2955
3563
|
|
|
2956
3564
|
|
|
2957
|
-
class
|
|
3565
|
+
class CreateChatCompletionRouterChatCompletionsAudio(BaseModel):
|
|
2958
3566
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
2959
3567
|
|
|
2960
3568
|
id: str
|
|
@@ -2979,7 +3587,9 @@ class CreateChatCompletionMessageTypedDict(TypedDict):
|
|
|
2979
3587
|
r"""The signature holds a cryptographic token which verifies that the thinking block was generated by the model, and is verified when thinking is part of a multiturn conversation. This value should not be modified and should always be sent to the API when the reasoning is redacted. Currently only supported by `Anthropic`."""
|
|
2980
3588
|
redacted_reasoning: NotRequired[str]
|
|
2981
3589
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
2982
|
-
audio: NotRequired[
|
|
3590
|
+
audio: NotRequired[
|
|
3591
|
+
Nullable[CreateChatCompletionRouterChatCompletionsAudioTypedDict]
|
|
3592
|
+
]
|
|
2983
3593
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
2984
3594
|
|
|
2985
3595
|
|
|
@@ -3003,51 +3613,44 @@ class CreateChatCompletionMessage(BaseModel):
|
|
|
3003
3613
|
redacted_reasoning: Optional[str] = None
|
|
3004
3614
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
3005
3615
|
|
|
3006
|
-
audio: OptionalNullable[
|
|
3616
|
+
audio: OptionalNullable[CreateChatCompletionRouterChatCompletionsAudio] = UNSET
|
|
3007
3617
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
3008
3618
|
|
|
3009
3619
|
@model_serializer(mode="wrap")
|
|
3010
3620
|
def serialize_model(self, handler):
|
|
3011
|
-
optional_fields =
|
|
3012
|
-
|
|
3013
|
-
|
|
3014
|
-
|
|
3015
|
-
|
|
3016
|
-
|
|
3017
|
-
|
|
3018
|
-
|
|
3019
|
-
|
|
3020
|
-
|
|
3021
|
-
|
|
3022
|
-
|
|
3023
|
-
|
|
3024
|
-
"reasoning",
|
|
3025
|
-
|
|
3026
|
-
"audio",
|
|
3027
|
-
]
|
|
3028
|
-
null_default_fields = []
|
|
3029
|
-
|
|
3621
|
+
optional_fields = set(
|
|
3622
|
+
[
|
|
3623
|
+
"content",
|
|
3624
|
+
"refusal",
|
|
3625
|
+
"tool_calls",
|
|
3626
|
+
"role",
|
|
3627
|
+
"reasoning",
|
|
3628
|
+
"reasoning_signature",
|
|
3629
|
+
"redacted_reasoning",
|
|
3630
|
+
"audio",
|
|
3631
|
+
]
|
|
3632
|
+
)
|
|
3633
|
+
nullable_fields = set(
|
|
3634
|
+
["content", "refusal", "reasoning", "reasoning_signature", "audio"]
|
|
3635
|
+
)
|
|
3030
3636
|
serialized = handler(self)
|
|
3031
|
-
|
|
3032
3637
|
m = {}
|
|
3033
3638
|
|
|
3034
3639
|
for n, f in type(self).model_fields.items():
|
|
3035
3640
|
k = f.alias or n
|
|
3036
3641
|
val = serialized.get(k)
|
|
3037
|
-
|
|
3038
|
-
|
|
3039
|
-
|
|
3040
|
-
|
|
3041
|
-
|
|
3042
|
-
|
|
3043
|
-
|
|
3044
|
-
|
|
3045
|
-
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
|
|
3049
|
-
):
|
|
3050
|
-
m[k] = val
|
|
3642
|
+
is_nullable_and_explicitly_set = (
|
|
3643
|
+
k in nullable_fields
|
|
3644
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3645
|
+
)
|
|
3646
|
+
|
|
3647
|
+
if val != UNSET_SENTINEL:
|
|
3648
|
+
if (
|
|
3649
|
+
val is not None
|
|
3650
|
+
or k not in optional_fields
|
|
3651
|
+
or is_nullable_and_explicitly_set
|
|
3652
|
+
):
|
|
3653
|
+
m[k] = val
|
|
3051
3654
|
|
|
3052
3655
|
return m
|
|
3053
3656
|
|
|
@@ -3073,30 +3676,14 @@ class CreateChatCompletionTopLogprobs(BaseModel):
|
|
|
3073
3676
|
|
|
3074
3677
|
@model_serializer(mode="wrap")
|
|
3075
3678
|
def serialize_model(self, handler):
|
|
3076
|
-
optional_fields = []
|
|
3077
|
-
nullable_fields = ["bytes"]
|
|
3078
|
-
null_default_fields = []
|
|
3079
|
-
|
|
3080
3679
|
serialized = handler(self)
|
|
3081
|
-
|
|
3082
3680
|
m = {}
|
|
3083
3681
|
|
|
3084
3682
|
for n, f in type(self).model_fields.items():
|
|
3085
3683
|
k = f.alias or n
|
|
3086
3684
|
val = serialized.get(k)
|
|
3087
|
-
serialized.pop(k, None)
|
|
3088
|
-
|
|
3089
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3090
|
-
is_set = (
|
|
3091
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3092
|
-
or k in null_default_fields
|
|
3093
|
-
) # pylint: disable=no-member
|
|
3094
3685
|
|
|
3095
|
-
if val
|
|
3096
|
-
m[k] = val
|
|
3097
|
-
elif val != UNSET_SENTINEL and (
|
|
3098
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3099
|
-
):
|
|
3686
|
+
if val != UNSET_SENTINEL:
|
|
3100
3687
|
m[k] = val
|
|
3101
3688
|
|
|
3102
3689
|
return m
|
|
@@ -3128,36 +3715,20 @@ class CreateChatCompletionContent(BaseModel):
|
|
|
3128
3715
|
|
|
3129
3716
|
@model_serializer(mode="wrap")
|
|
3130
3717
|
def serialize_model(self, handler):
|
|
3131
|
-
optional_fields = []
|
|
3132
|
-
nullable_fields = ["bytes"]
|
|
3133
|
-
null_default_fields = []
|
|
3134
|
-
|
|
3135
3718
|
serialized = handler(self)
|
|
3136
|
-
|
|
3137
3719
|
m = {}
|
|
3138
3720
|
|
|
3139
3721
|
for n, f in type(self).model_fields.items():
|
|
3140
3722
|
k = f.alias or n
|
|
3141
3723
|
val = serialized.get(k)
|
|
3142
|
-
serialized.pop(k, None)
|
|
3143
|
-
|
|
3144
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3145
|
-
is_set = (
|
|
3146
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3147
|
-
or k in null_default_fields
|
|
3148
|
-
) # pylint: disable=no-member
|
|
3149
3724
|
|
|
3150
|
-
if val
|
|
3151
|
-
m[k] = val
|
|
3152
|
-
elif val != UNSET_SENTINEL and (
|
|
3153
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3154
|
-
):
|
|
3725
|
+
if val != UNSET_SENTINEL:
|
|
3155
3726
|
m[k] = val
|
|
3156
3727
|
|
|
3157
3728
|
return m
|
|
3158
3729
|
|
|
3159
3730
|
|
|
3160
|
-
class
|
|
3731
|
+
class CreateChatCompletionRouterChatCompletionsTopLogprobsTypedDict(TypedDict):
|
|
3161
3732
|
token: str
|
|
3162
3733
|
r"""The token."""
|
|
3163
3734
|
logprob: float
|
|
@@ -3166,7 +3737,7 @@ class CreateChatCompletionRouterTopLogprobsTypedDict(TypedDict):
|
|
|
3166
3737
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
3167
3738
|
|
|
3168
3739
|
|
|
3169
|
-
class
|
|
3740
|
+
class CreateChatCompletionRouterChatCompletionsTopLogprobs(BaseModel):
|
|
3170
3741
|
token: str
|
|
3171
3742
|
r"""The token."""
|
|
3172
3743
|
|
|
@@ -3178,30 +3749,14 @@ class CreateChatCompletionRouterTopLogprobs(BaseModel):
|
|
|
3178
3749
|
|
|
3179
3750
|
@model_serializer(mode="wrap")
|
|
3180
3751
|
def serialize_model(self, handler):
|
|
3181
|
-
optional_fields = []
|
|
3182
|
-
nullable_fields = ["bytes"]
|
|
3183
|
-
null_default_fields = []
|
|
3184
|
-
|
|
3185
3752
|
serialized = handler(self)
|
|
3186
|
-
|
|
3187
3753
|
m = {}
|
|
3188
3754
|
|
|
3189
3755
|
for n, f in type(self).model_fields.items():
|
|
3190
3756
|
k = f.alias or n
|
|
3191
3757
|
val = serialized.get(k)
|
|
3192
|
-
serialized.pop(k, None)
|
|
3193
|
-
|
|
3194
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3195
|
-
is_set = (
|
|
3196
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3197
|
-
or k in null_default_fields
|
|
3198
|
-
) # pylint: disable=no-member
|
|
3199
3758
|
|
|
3200
|
-
if val
|
|
3201
|
-
m[k] = val
|
|
3202
|
-
elif val != UNSET_SENTINEL and (
|
|
3203
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3204
|
-
):
|
|
3759
|
+
if val != UNSET_SENTINEL:
|
|
3205
3760
|
m[k] = val
|
|
3206
3761
|
|
|
3207
3762
|
return m
|
|
@@ -3214,7 +3769,7 @@ class CreateChatCompletionRefusalTypedDict(TypedDict):
|
|
|
3214
3769
|
r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify that the token is very unlikely."""
|
|
3215
3770
|
bytes_: Nullable[List[float]]
|
|
3216
3771
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
3217
|
-
top_logprobs: List[
|
|
3772
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsTopLogprobsTypedDict]
|
|
3218
3773
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
3219
3774
|
|
|
3220
3775
|
|
|
@@ -3228,35 +3783,19 @@ class CreateChatCompletionRefusal(BaseModel):
|
|
|
3228
3783
|
bytes_: Annotated[Nullable[List[float]], pydantic.Field(alias="bytes")]
|
|
3229
3784
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
3230
3785
|
|
|
3231
|
-
top_logprobs: List[
|
|
3786
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsTopLogprobs]
|
|
3232
3787
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
3233
3788
|
|
|
3234
3789
|
@model_serializer(mode="wrap")
|
|
3235
3790
|
def serialize_model(self, handler):
|
|
3236
|
-
optional_fields = []
|
|
3237
|
-
nullable_fields = ["bytes"]
|
|
3238
|
-
null_default_fields = []
|
|
3239
|
-
|
|
3240
3791
|
serialized = handler(self)
|
|
3241
|
-
|
|
3242
3792
|
m = {}
|
|
3243
3793
|
|
|
3244
3794
|
for n, f in type(self).model_fields.items():
|
|
3245
3795
|
k = f.alias or n
|
|
3246
3796
|
val = serialized.get(k)
|
|
3247
|
-
serialized.pop(k, None)
|
|
3248
|
-
|
|
3249
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3250
|
-
is_set = (
|
|
3251
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3252
|
-
or k in null_default_fields
|
|
3253
|
-
) # pylint: disable=no-member
|
|
3254
3797
|
|
|
3255
|
-
if val
|
|
3256
|
-
m[k] = val
|
|
3257
|
-
elif val != UNSET_SENTINEL and (
|
|
3258
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3259
|
-
):
|
|
3798
|
+
if val != UNSET_SENTINEL:
|
|
3260
3799
|
m[k] = val
|
|
3261
3800
|
|
|
3262
3801
|
return m
|
|
@@ -3282,30 +3821,14 @@ class CreateChatCompletionLogprobs(BaseModel):
|
|
|
3282
3821
|
|
|
3283
3822
|
@model_serializer(mode="wrap")
|
|
3284
3823
|
def serialize_model(self, handler):
|
|
3285
|
-
optional_fields = []
|
|
3286
|
-
nullable_fields = ["content", "refusal"]
|
|
3287
|
-
null_default_fields = []
|
|
3288
|
-
|
|
3289
3824
|
serialized = handler(self)
|
|
3290
|
-
|
|
3291
3825
|
m = {}
|
|
3292
3826
|
|
|
3293
3827
|
for n, f in type(self).model_fields.items():
|
|
3294
3828
|
k = f.alias or n
|
|
3295
3829
|
val = serialized.get(k)
|
|
3296
|
-
serialized.pop(k, None)
|
|
3297
|
-
|
|
3298
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3299
|
-
is_set = (
|
|
3300
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3301
|
-
or k in null_default_fields
|
|
3302
|
-
) # pylint: disable=no-member
|
|
3303
3830
|
|
|
3304
|
-
if val
|
|
3305
|
-
m[k] = val
|
|
3306
|
-
elif val != UNSET_SENTINEL and (
|
|
3307
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3308
|
-
):
|
|
3831
|
+
if val != UNSET_SENTINEL:
|
|
3309
3832
|
m[k] = val
|
|
3310
3833
|
|
|
3311
3834
|
return m
|
|
@@ -3337,31 +3860,26 @@ class CreateChatCompletionChoices(BaseModel):
|
|
|
3337
3860
|
|
|
3338
3861
|
@model_serializer(mode="wrap")
|
|
3339
3862
|
def serialize_model(self, handler):
|
|
3340
|
-
optional_fields = ["index", "logprobs"]
|
|
3341
|
-
nullable_fields = ["finish_reason", "logprobs"]
|
|
3342
|
-
null_default_fields = []
|
|
3343
|
-
|
|
3863
|
+
optional_fields = set(["index", "logprobs"])
|
|
3864
|
+
nullable_fields = set(["finish_reason", "logprobs"])
|
|
3344
3865
|
serialized = handler(self)
|
|
3345
|
-
|
|
3346
3866
|
m = {}
|
|
3347
3867
|
|
|
3348
3868
|
for n, f in type(self).model_fields.items():
|
|
3349
3869
|
k = f.alias or n
|
|
3350
3870
|
val = serialized.get(k)
|
|
3351
|
-
|
|
3352
|
-
|
|
3353
|
-
|
|
3354
|
-
|
|
3355
|
-
|
|
3356
|
-
|
|
3357
|
-
|
|
3358
|
-
|
|
3359
|
-
|
|
3360
|
-
|
|
3361
|
-
|
|
3362
|
-
|
|
3363
|
-
):
|
|
3364
|
-
m[k] = val
|
|
3871
|
+
is_nullable_and_explicitly_set = (
|
|
3872
|
+
k in nullable_fields
|
|
3873
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3874
|
+
)
|
|
3875
|
+
|
|
3876
|
+
if val != UNSET_SENTINEL:
|
|
3877
|
+
if (
|
|
3878
|
+
val is not None
|
|
3879
|
+
or k not in optional_fields
|
|
3880
|
+
or is_nullable_and_explicitly_set
|
|
3881
|
+
):
|
|
3882
|
+
m[k] = val
|
|
3365
3883
|
|
|
3366
3884
|
return m
|
|
3367
3885
|
|
|
@@ -3383,31 +3901,30 @@ class CreateChatCompletionPromptTokensDetails(BaseModel):
|
|
|
3383
3901
|
|
|
3384
3902
|
@model_serializer(mode="wrap")
|
|
3385
3903
|
def serialize_model(self, handler):
|
|
3386
|
-
optional_fields =
|
|
3387
|
-
|
|
3388
|
-
|
|
3389
|
-
|
|
3904
|
+
optional_fields = set(
|
|
3905
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3906
|
+
)
|
|
3907
|
+
nullable_fields = set(
|
|
3908
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3909
|
+
)
|
|
3390
3910
|
serialized = handler(self)
|
|
3391
|
-
|
|
3392
3911
|
m = {}
|
|
3393
3912
|
|
|
3394
3913
|
for n, f in type(self).model_fields.items():
|
|
3395
3914
|
k = f.alias or n
|
|
3396
3915
|
val = serialized.get(k)
|
|
3397
|
-
|
|
3398
|
-
|
|
3399
|
-
|
|
3400
|
-
|
|
3401
|
-
|
|
3402
|
-
|
|
3403
|
-
|
|
3404
|
-
|
|
3405
|
-
|
|
3406
|
-
|
|
3407
|
-
|
|
3408
|
-
|
|
3409
|
-
):
|
|
3410
|
-
m[k] = val
|
|
3916
|
+
is_nullable_and_explicitly_set = (
|
|
3917
|
+
k in nullable_fields
|
|
3918
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3919
|
+
)
|
|
3920
|
+
|
|
3921
|
+
if val != UNSET_SENTINEL:
|
|
3922
|
+
if (
|
|
3923
|
+
val is not None
|
|
3924
|
+
or k not in optional_fields
|
|
3925
|
+
or is_nullable_and_explicitly_set
|
|
3926
|
+
):
|
|
3927
|
+
m[k] = val
|
|
3411
3928
|
|
|
3412
3929
|
return m
|
|
3413
3930
|
|
|
@@ -3432,41 +3949,40 @@ class CreateChatCompletionCompletionTokensDetails(BaseModel):
|
|
|
3432
3949
|
|
|
3433
3950
|
@model_serializer(mode="wrap")
|
|
3434
3951
|
def serialize_model(self, handler):
|
|
3435
|
-
optional_fields =
|
|
3436
|
-
|
|
3437
|
-
|
|
3438
|
-
|
|
3439
|
-
|
|
3440
|
-
|
|
3441
|
-
|
|
3442
|
-
|
|
3443
|
-
|
|
3444
|
-
|
|
3445
|
-
|
|
3446
|
-
|
|
3447
|
-
|
|
3448
|
-
|
|
3952
|
+
optional_fields = set(
|
|
3953
|
+
[
|
|
3954
|
+
"reasoning_tokens",
|
|
3955
|
+
"accepted_prediction_tokens",
|
|
3956
|
+
"rejected_prediction_tokens",
|
|
3957
|
+
"audio_tokens",
|
|
3958
|
+
]
|
|
3959
|
+
)
|
|
3960
|
+
nullable_fields = set(
|
|
3961
|
+
[
|
|
3962
|
+
"reasoning_tokens",
|
|
3963
|
+
"accepted_prediction_tokens",
|
|
3964
|
+
"rejected_prediction_tokens",
|
|
3965
|
+
"audio_tokens",
|
|
3966
|
+
]
|
|
3967
|
+
)
|
|
3449
3968
|
serialized = handler(self)
|
|
3450
|
-
|
|
3451
3969
|
m = {}
|
|
3452
3970
|
|
|
3453
3971
|
for n, f in type(self).model_fields.items():
|
|
3454
3972
|
k = f.alias or n
|
|
3455
3973
|
val = serialized.get(k)
|
|
3456
|
-
|
|
3457
|
-
|
|
3458
|
-
|
|
3459
|
-
|
|
3460
|
-
|
|
3461
|
-
|
|
3462
|
-
|
|
3463
|
-
|
|
3464
|
-
|
|
3465
|
-
|
|
3466
|
-
|
|
3467
|
-
|
|
3468
|
-
):
|
|
3469
|
-
m[k] = val
|
|
3974
|
+
is_nullable_and_explicitly_set = (
|
|
3975
|
+
k in nullable_fields
|
|
3976
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3977
|
+
)
|
|
3978
|
+
|
|
3979
|
+
if val != UNSET_SENTINEL:
|
|
3980
|
+
if (
|
|
3981
|
+
val is not None
|
|
3982
|
+
or k not in optional_fields
|
|
3983
|
+
or is_nullable_and_explicitly_set
|
|
3984
|
+
):
|
|
3985
|
+
m[k] = val
|
|
3470
3986
|
|
|
3471
3987
|
return m
|
|
3472
3988
|
|
|
@@ -3510,37 +4026,34 @@ class CreateChatCompletionUsage(BaseModel):
|
|
|
3510
4026
|
|
|
3511
4027
|
@model_serializer(mode="wrap")
|
|
3512
4028
|
def serialize_model(self, handler):
|
|
3513
|
-
optional_fields =
|
|
3514
|
-
|
|
3515
|
-
|
|
3516
|
-
|
|
3517
|
-
|
|
3518
|
-
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
|
|
3522
|
-
|
|
4029
|
+
optional_fields = set(
|
|
4030
|
+
[
|
|
4031
|
+
"completion_tokens",
|
|
4032
|
+
"prompt_tokens",
|
|
4033
|
+
"total_tokens",
|
|
4034
|
+
"prompt_tokens_details",
|
|
4035
|
+
"completion_tokens_details",
|
|
4036
|
+
]
|
|
4037
|
+
)
|
|
4038
|
+
nullable_fields = set(["prompt_tokens_details", "completion_tokens_details"])
|
|
3523
4039
|
serialized = handler(self)
|
|
3524
|
-
|
|
3525
4040
|
m = {}
|
|
3526
4041
|
|
|
3527
4042
|
for n, f in type(self).model_fields.items():
|
|
3528
4043
|
k = f.alias or n
|
|
3529
4044
|
val = serialized.get(k)
|
|
3530
|
-
|
|
3531
|
-
|
|
3532
|
-
|
|
3533
|
-
|
|
3534
|
-
|
|
3535
|
-
|
|
3536
|
-
|
|
3537
|
-
|
|
3538
|
-
|
|
3539
|
-
|
|
3540
|
-
|
|
3541
|
-
|
|
3542
|
-
):
|
|
3543
|
-
m[k] = val
|
|
4045
|
+
is_nullable_and_explicitly_set = (
|
|
4046
|
+
k in nullable_fields
|
|
4047
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
4048
|
+
)
|
|
4049
|
+
|
|
4050
|
+
if val != UNSET_SENTINEL:
|
|
4051
|
+
if (
|
|
4052
|
+
val is not None
|
|
4053
|
+
or k not in optional_fields
|
|
4054
|
+
or is_nullable_and_explicitly_set
|
|
4055
|
+
):
|
|
4056
|
+
m[k] = val
|
|
3544
4057
|
|
|
3545
4058
|
return m
|
|
3546
4059
|
|
|
@@ -3591,31 +4104,26 @@ class CreateChatCompletionResponseBody(BaseModel):
|
|
|
3591
4104
|
|
|
3592
4105
|
@model_serializer(mode="wrap")
|
|
3593
4106
|
def serialize_model(self, handler):
|
|
3594
|
-
optional_fields = ["system_fingerprint", "usage"]
|
|
3595
|
-
nullable_fields = ["system_fingerprint", "usage"]
|
|
3596
|
-
null_default_fields = []
|
|
3597
|
-
|
|
4107
|
+
optional_fields = set(["system_fingerprint", "usage"])
|
|
4108
|
+
nullable_fields = set(["system_fingerprint", "usage"])
|
|
3598
4109
|
serialized = handler(self)
|
|
3599
|
-
|
|
3600
4110
|
m = {}
|
|
3601
4111
|
|
|
3602
4112
|
for n, f in type(self).model_fields.items():
|
|
3603
4113
|
k = f.alias or n
|
|
3604
4114
|
val = serialized.get(k)
|
|
3605
|
-
|
|
3606
|
-
|
|
3607
|
-
|
|
3608
|
-
|
|
3609
|
-
|
|
3610
|
-
|
|
3611
|
-
|
|
3612
|
-
|
|
3613
|
-
|
|
3614
|
-
|
|
3615
|
-
|
|
3616
|
-
|
|
3617
|
-
):
|
|
3618
|
-
m[k] = val
|
|
4115
|
+
is_nullable_and_explicitly_set = (
|
|
4116
|
+
k in nullable_fields
|
|
4117
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
4118
|
+
)
|
|
4119
|
+
|
|
4120
|
+
if val != UNSET_SENTINEL:
|
|
4121
|
+
if (
|
|
4122
|
+
val is not None
|
|
4123
|
+
or k not in optional_fields
|
|
4124
|
+
or is_nullable_and_explicitly_set
|
|
4125
|
+
):
|
|
4126
|
+
m[k] = val
|
|
3619
4127
|
|
|
3620
4128
|
return m
|
|
3621
4129
|
|
|
@@ -3625,9 +4133,11 @@ CreateChatCompletionResponseTypedDict = TypeAliasType(
|
|
|
3625
4133
|
Union[
|
|
3626
4134
|
CreateChatCompletionResponseBodyTypedDict,
|
|
3627
4135
|
Union[
|
|
3628
|
-
eventstreaming.EventStream[
|
|
4136
|
+
eventstreaming.EventStream[
|
|
4137
|
+
CreateChatCompletionRouterChatCompletionsResponseBodyTypedDict
|
|
4138
|
+
],
|
|
3629
4139
|
eventstreaming.EventStreamAsync[
|
|
3630
|
-
|
|
4140
|
+
CreateChatCompletionRouterChatCompletionsResponseBodyTypedDict
|
|
3631
4141
|
],
|
|
3632
4142
|
],
|
|
3633
4143
|
],
|
|
@@ -3639,8 +4149,12 @@ CreateChatCompletionResponse = TypeAliasType(
|
|
|
3639
4149
|
Union[
|
|
3640
4150
|
CreateChatCompletionResponseBody,
|
|
3641
4151
|
Union[
|
|
3642
|
-
eventstreaming.EventStream[
|
|
3643
|
-
|
|
4152
|
+
eventstreaming.EventStream[
|
|
4153
|
+
CreateChatCompletionRouterChatCompletionsResponseBody
|
|
4154
|
+
],
|
|
4155
|
+
eventstreaming.EventStreamAsync[
|
|
4156
|
+
CreateChatCompletionRouterChatCompletionsResponseBody
|
|
4157
|
+
],
|
|
3644
4158
|
],
|
|
3645
4159
|
],
|
|
3646
4160
|
)
|