orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5746 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1945 -383
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1381 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +407 -128
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2759 -1251
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1696 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1679 -230
- orq_ai_sdk/models/getpromptversionop.py +1676 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +805 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1690 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1462 -196
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1439 -194
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1968 -397
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2854 -1448
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +364 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
- orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -11,6 +11,7 @@ from .imagecontentpartschema import (
|
|
|
11
11
|
ImageContentPartSchemaTypedDict,
|
|
12
12
|
)
|
|
13
13
|
from .publiccontact import PublicContact, PublicContactTypedDict
|
|
14
|
+
from .publicidentity import PublicIdentity, PublicIdentityTypedDict
|
|
14
15
|
from .reasoningpartschema import ReasoningPartSchema, ReasoningPartSchemaTypedDict
|
|
15
16
|
from .redactedreasoningpartschema import (
|
|
16
17
|
RedactedReasoningPartSchema,
|
|
@@ -46,31 +47,37 @@ from typing_extensions import (
|
|
|
46
47
|
)
|
|
47
48
|
|
|
48
49
|
|
|
49
|
-
|
|
50
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Role = Literal[
|
|
51
|
+
"tool",
|
|
52
|
+
]
|
|
50
53
|
r"""The role of the messages author, in this case tool."""
|
|
51
54
|
|
|
52
55
|
|
|
53
|
-
|
|
56
|
+
CreateChatCompletionContentRouterChatCompletionsRequest2TypedDict = (
|
|
57
|
+
TextContentPartSchemaTypedDict
|
|
58
|
+
)
|
|
54
59
|
|
|
55
60
|
|
|
56
|
-
|
|
61
|
+
CreateChatCompletionContentRouterChatCompletionsRequest2 = TextContentPartSchema
|
|
57
62
|
|
|
58
63
|
|
|
59
|
-
|
|
60
|
-
"
|
|
61
|
-
Union[str, List[
|
|
64
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5ContentTypedDict = TypeAliasType(
|
|
65
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5ContentTypedDict",
|
|
66
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletionsRequest2TypedDict]],
|
|
62
67
|
)
|
|
63
68
|
r"""The contents of the tool message."""
|
|
64
69
|
|
|
65
70
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
71
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Content = (
|
|
72
|
+
TypeAliasType(
|
|
73
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Content",
|
|
74
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletionsRequest2]],
|
|
75
|
+
)
|
|
69
76
|
)
|
|
70
77
|
r"""The contents of the tool message."""
|
|
71
78
|
|
|
72
79
|
|
|
73
|
-
|
|
80
|
+
CreateChatCompletionMessagesRouterChatCompletionsType = Literal["ephemeral",]
|
|
74
81
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
75
82
|
|
|
76
83
|
|
|
@@ -88,7 +95,7 @@ Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
|
88
95
|
|
|
89
96
|
|
|
90
97
|
class CreateChatCompletionMessagesCacheControlTypedDict(TypedDict):
|
|
91
|
-
type:
|
|
98
|
+
type: CreateChatCompletionMessagesRouterChatCompletionsType
|
|
92
99
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
93
100
|
ttl: NotRequired[CreateChatCompletionMessagesTTL]
|
|
94
101
|
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
@@ -101,7 +108,7 @@ class CreateChatCompletionMessagesCacheControlTypedDict(TypedDict):
|
|
|
101
108
|
|
|
102
109
|
|
|
103
110
|
class CreateChatCompletionMessagesCacheControl(BaseModel):
|
|
104
|
-
type:
|
|
111
|
+
type: CreateChatCompletionMessagesRouterChatCompletionsType
|
|
105
112
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
106
113
|
|
|
107
114
|
ttl: Optional[CreateChatCompletionMessagesTTL] = "5m"
|
|
@@ -113,11 +120,27 @@ class CreateChatCompletionMessagesCacheControl(BaseModel):
|
|
|
113
120
|
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
114
121
|
"""
|
|
115
122
|
|
|
123
|
+
@model_serializer(mode="wrap")
|
|
124
|
+
def serialize_model(self, handler):
|
|
125
|
+
optional_fields = set(["ttl"])
|
|
126
|
+
serialized = handler(self)
|
|
127
|
+
m = {}
|
|
128
|
+
|
|
129
|
+
for n, f in type(self).model_fields.items():
|
|
130
|
+
k = f.alias or n
|
|
131
|
+
val = serialized.get(k)
|
|
132
|
+
|
|
133
|
+
if val != UNSET_SENTINEL:
|
|
134
|
+
if val is not None or k not in optional_fields:
|
|
135
|
+
m[k] = val
|
|
136
|
+
|
|
137
|
+
return m
|
|
138
|
+
|
|
116
139
|
|
|
117
140
|
class CreateChatCompletionMessagesToolMessageTypedDict(TypedDict):
|
|
118
|
-
role:
|
|
141
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Role
|
|
119
142
|
r"""The role of the messages author, in this case tool."""
|
|
120
|
-
content:
|
|
143
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5ContentTypedDict
|
|
121
144
|
r"""The contents of the tool message."""
|
|
122
145
|
tool_call_id: Nullable[str]
|
|
123
146
|
r"""Tool call that this message is responding to."""
|
|
@@ -125,10 +148,10 @@ class CreateChatCompletionMessagesToolMessageTypedDict(TypedDict):
|
|
|
125
148
|
|
|
126
149
|
|
|
127
150
|
class CreateChatCompletionMessagesToolMessage(BaseModel):
|
|
128
|
-
role:
|
|
151
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Role
|
|
129
152
|
r"""The role of the messages author, in this case tool."""
|
|
130
153
|
|
|
131
|
-
content:
|
|
154
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBody5Content
|
|
132
155
|
r"""The contents of the tool message."""
|
|
133
156
|
|
|
134
157
|
tool_call_id: Nullable[str]
|
|
@@ -138,37 +161,32 @@ class CreateChatCompletionMessagesToolMessage(BaseModel):
|
|
|
138
161
|
|
|
139
162
|
@model_serializer(mode="wrap")
|
|
140
163
|
def serialize_model(self, handler):
|
|
141
|
-
optional_fields = ["cache_control"]
|
|
142
|
-
nullable_fields = ["tool_call_id"]
|
|
143
|
-
null_default_fields = []
|
|
144
|
-
|
|
164
|
+
optional_fields = set(["cache_control"])
|
|
165
|
+
nullable_fields = set(["tool_call_id"])
|
|
145
166
|
serialized = handler(self)
|
|
146
|
-
|
|
147
167
|
m = {}
|
|
148
168
|
|
|
149
169
|
for n, f in type(self).model_fields.items():
|
|
150
170
|
k = f.alias or n
|
|
151
171
|
val = serialized.get(k)
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
):
|
|
165
|
-
m[k] = val
|
|
172
|
+
is_nullable_and_explicitly_set = (
|
|
173
|
+
k in nullable_fields
|
|
174
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if val != UNSET_SENTINEL:
|
|
178
|
+
if (
|
|
179
|
+
val is not None
|
|
180
|
+
or k not in optional_fields
|
|
181
|
+
or is_nullable_and_explicitly_set
|
|
182
|
+
):
|
|
183
|
+
m[k] = val
|
|
166
184
|
|
|
167
185
|
return m
|
|
168
186
|
|
|
169
187
|
|
|
170
|
-
|
|
171
|
-
"
|
|
188
|
+
CreateChatCompletionContentRouterChatCompletions2TypedDict = TypeAliasType(
|
|
189
|
+
"CreateChatCompletionContentRouterChatCompletions2TypedDict",
|
|
172
190
|
Union[
|
|
173
191
|
RefusalPartSchemaTypedDict,
|
|
174
192
|
RedactedReasoningPartSchemaTypedDict,
|
|
@@ -178,7 +196,7 @@ CreateChatCompletionContentRouter2TypedDict = TypeAliasType(
|
|
|
178
196
|
)
|
|
179
197
|
|
|
180
198
|
|
|
181
|
-
|
|
199
|
+
CreateChatCompletionContentRouterChatCompletions2 = Annotated[
|
|
182
200
|
Union[
|
|
183
201
|
Annotated[TextContentPartSchema, Tag("text")],
|
|
184
202
|
Annotated[RefusalPartSchema, Tag("refusal")],
|
|
@@ -189,21 +207,25 @@ CreateChatCompletionContentRouter2 = Annotated[
|
|
|
189
207
|
]
|
|
190
208
|
|
|
191
209
|
|
|
192
|
-
|
|
193
|
-
"
|
|
194
|
-
Union[str, List[
|
|
210
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContentTypedDict = TypeAliasType(
|
|
211
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContentTypedDict",
|
|
212
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletions2TypedDict]],
|
|
195
213
|
)
|
|
196
214
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
197
215
|
|
|
198
216
|
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
217
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContent = (
|
|
218
|
+
TypeAliasType(
|
|
219
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContent",
|
|
220
|
+
Union[str, List[CreateChatCompletionContentRouterChatCompletions2]],
|
|
221
|
+
)
|
|
202
222
|
)
|
|
203
223
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
204
224
|
|
|
205
225
|
|
|
206
|
-
|
|
226
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyRole = Literal[
|
|
227
|
+
"assistant",
|
|
228
|
+
]
|
|
207
229
|
r"""The role of the messages author, in this case `assistant`."""
|
|
208
230
|
|
|
209
231
|
|
|
@@ -239,6 +261,22 @@ class CreateChatCompletionMessagesFunction(BaseModel):
|
|
|
239
261
|
arguments: Optional[str] = None
|
|
240
262
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
241
263
|
|
|
264
|
+
@model_serializer(mode="wrap")
|
|
265
|
+
def serialize_model(self, handler):
|
|
266
|
+
optional_fields = set(["name", "arguments"])
|
|
267
|
+
serialized = handler(self)
|
|
268
|
+
m = {}
|
|
269
|
+
|
|
270
|
+
for n, f in type(self).model_fields.items():
|
|
271
|
+
k = f.alias or n
|
|
272
|
+
val = serialized.get(k)
|
|
273
|
+
|
|
274
|
+
if val != UNSET_SENTINEL:
|
|
275
|
+
if val is not None or k not in optional_fields:
|
|
276
|
+
m[k] = val
|
|
277
|
+
|
|
278
|
+
return m
|
|
279
|
+
|
|
242
280
|
|
|
243
281
|
class CreateChatCompletionMessagesToolCallsTypedDict(TypedDict):
|
|
244
282
|
id: str
|
|
@@ -262,12 +300,30 @@ class CreateChatCompletionMessagesToolCalls(BaseModel):
|
|
|
262
300
|
thought_signature: Optional[str] = None
|
|
263
301
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
264
302
|
|
|
303
|
+
@model_serializer(mode="wrap")
|
|
304
|
+
def serialize_model(self, handler):
|
|
305
|
+
optional_fields = set(["thought_signature"])
|
|
306
|
+
serialized = handler(self)
|
|
307
|
+
m = {}
|
|
308
|
+
|
|
309
|
+
for n, f in type(self).model_fields.items():
|
|
310
|
+
k = f.alias or n
|
|
311
|
+
val = serialized.get(k)
|
|
312
|
+
|
|
313
|
+
if val != UNSET_SENTINEL:
|
|
314
|
+
if val is not None or k not in optional_fields:
|
|
315
|
+
m[k] = val
|
|
316
|
+
|
|
317
|
+
return m
|
|
318
|
+
|
|
265
319
|
|
|
266
320
|
class CreateChatCompletionMessagesAssistantMessageTypedDict(TypedDict):
|
|
267
|
-
role:
|
|
321
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyRole
|
|
268
322
|
r"""The role of the messages author, in this case `assistant`."""
|
|
269
323
|
content: NotRequired[
|
|
270
|
-
Nullable[
|
|
324
|
+
Nullable[
|
|
325
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContentTypedDict
|
|
326
|
+
]
|
|
271
327
|
]
|
|
272
328
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
273
329
|
refusal: NotRequired[Nullable[str]]
|
|
@@ -281,11 +337,11 @@ class CreateChatCompletionMessagesAssistantMessageTypedDict(TypedDict):
|
|
|
281
337
|
|
|
282
338
|
|
|
283
339
|
class CreateChatCompletionMessagesAssistantMessage(BaseModel):
|
|
284
|
-
role:
|
|
340
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyRole
|
|
285
341
|
r"""The role of the messages author, in this case `assistant`."""
|
|
286
342
|
|
|
287
343
|
content: OptionalNullable[
|
|
288
|
-
|
|
344
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRequestBodyContent
|
|
289
345
|
] = UNSET
|
|
290
346
|
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
291
347
|
|
|
@@ -303,36 +359,31 @@ class CreateChatCompletionMessagesAssistantMessage(BaseModel):
|
|
|
303
359
|
|
|
304
360
|
@model_serializer(mode="wrap")
|
|
305
361
|
def serialize_model(self, handler):
|
|
306
|
-
optional_fields = ["content", "refusal", "name", "audio", "tool_calls"]
|
|
307
|
-
nullable_fields = ["content", "refusal", "audio"]
|
|
308
|
-
null_default_fields = []
|
|
309
|
-
|
|
362
|
+
optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
|
|
363
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
310
364
|
serialized = handler(self)
|
|
311
|
-
|
|
312
365
|
m = {}
|
|
313
366
|
|
|
314
367
|
for n, f in type(self).model_fields.items():
|
|
315
368
|
k = f.alias or n
|
|
316
369
|
val = serialized.get(k)
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
):
|
|
330
|
-
m[k] = val
|
|
370
|
+
is_nullable_and_explicitly_set = (
|
|
371
|
+
k in nullable_fields
|
|
372
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
if val != UNSET_SENTINEL:
|
|
376
|
+
if (
|
|
377
|
+
val is not None
|
|
378
|
+
or k not in optional_fields
|
|
379
|
+
or is_nullable_and_explicitly_set
|
|
380
|
+
):
|
|
381
|
+
m[k] = val
|
|
331
382
|
|
|
332
383
|
return m
|
|
333
384
|
|
|
334
385
|
|
|
335
|
-
|
|
386
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestRole = Literal["user",]
|
|
336
387
|
r"""The role of the messages author, in this case `user`."""
|
|
337
388
|
|
|
338
389
|
|
|
@@ -340,7 +391,7 @@ CreateChatCompletion2Type = Literal["file",]
|
|
|
340
391
|
r"""The type of the content part. Always `file`."""
|
|
341
392
|
|
|
342
393
|
|
|
343
|
-
|
|
394
|
+
CreateChatCompletion2RouterChatCompletionsType = Literal["ephemeral",]
|
|
344
395
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
345
396
|
|
|
346
397
|
|
|
@@ -358,7 +409,7 @@ Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
|
358
409
|
|
|
359
410
|
|
|
360
411
|
class CreateChatCompletion2CacheControlTypedDict(TypedDict):
|
|
361
|
-
type:
|
|
412
|
+
type: CreateChatCompletion2RouterChatCompletionsType
|
|
362
413
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
363
414
|
ttl: NotRequired[CreateChatCompletion2TTL]
|
|
364
415
|
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
@@ -371,7 +422,7 @@ class CreateChatCompletion2CacheControlTypedDict(TypedDict):
|
|
|
371
422
|
|
|
372
423
|
|
|
373
424
|
class CreateChatCompletion2CacheControl(BaseModel):
|
|
374
|
-
type:
|
|
425
|
+
type: CreateChatCompletion2RouterChatCompletionsType
|
|
375
426
|
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
376
427
|
|
|
377
428
|
ttl: Optional[CreateChatCompletion2TTL] = "5m"
|
|
@@ -383,6 +434,22 @@ class CreateChatCompletion2CacheControl(BaseModel):
|
|
|
383
434
|
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
384
435
|
"""
|
|
385
436
|
|
|
437
|
+
@model_serializer(mode="wrap")
|
|
438
|
+
def serialize_model(self, handler):
|
|
439
|
+
optional_fields = set(["ttl"])
|
|
440
|
+
serialized = handler(self)
|
|
441
|
+
m = {}
|
|
442
|
+
|
|
443
|
+
for n, f in type(self).model_fields.items():
|
|
444
|
+
k = f.alias or n
|
|
445
|
+
val = serialized.get(k)
|
|
446
|
+
|
|
447
|
+
if val != UNSET_SENTINEL:
|
|
448
|
+
if val is not None or k not in optional_fields:
|
|
449
|
+
m[k] = val
|
|
450
|
+
|
|
451
|
+
return m
|
|
452
|
+
|
|
386
453
|
|
|
387
454
|
class CreateChatCompletion24TypedDict(TypedDict):
|
|
388
455
|
type: CreateChatCompletion2Type
|
|
@@ -401,6 +468,22 @@ class CreateChatCompletion24(BaseModel):
|
|
|
401
468
|
|
|
402
469
|
cache_control: Optional[CreateChatCompletion2CacheControl] = None
|
|
403
470
|
|
|
471
|
+
@model_serializer(mode="wrap")
|
|
472
|
+
def serialize_model(self, handler):
|
|
473
|
+
optional_fields = set(["cache_control"])
|
|
474
|
+
serialized = handler(self)
|
|
475
|
+
m = {}
|
|
476
|
+
|
|
477
|
+
for n, f in type(self).model_fields.items():
|
|
478
|
+
k = f.alias or n
|
|
479
|
+
val = serialized.get(k)
|
|
480
|
+
|
|
481
|
+
if val != UNSET_SENTINEL:
|
|
482
|
+
if val is not None or k not in optional_fields:
|
|
483
|
+
m[k] = val
|
|
484
|
+
|
|
485
|
+
return m
|
|
486
|
+
|
|
404
487
|
|
|
405
488
|
CreateChatCompletionContent2TypedDict = TypeAliasType(
|
|
406
489
|
"CreateChatCompletionContent2TypedDict",
|
|
@@ -424,76 +507,111 @@ CreateChatCompletionContent2 = Annotated[
|
|
|
424
507
|
]
|
|
425
508
|
|
|
426
509
|
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
510
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestContentTypedDict = (
|
|
511
|
+
TypeAliasType(
|
|
512
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestContentTypedDict",
|
|
513
|
+
Union[str, List[CreateChatCompletionContent2TypedDict]],
|
|
514
|
+
)
|
|
430
515
|
)
|
|
431
516
|
r"""The contents of the user message."""
|
|
432
517
|
|
|
433
518
|
|
|
434
|
-
|
|
435
|
-
"
|
|
519
|
+
CreateChatCompletionMessagesRouterChatCompletionsRequestContent = TypeAliasType(
|
|
520
|
+
"CreateChatCompletionMessagesRouterChatCompletionsRequestContent",
|
|
436
521
|
Union[str, List[CreateChatCompletionContent2]],
|
|
437
522
|
)
|
|
438
523
|
r"""The contents of the user message."""
|
|
439
524
|
|
|
440
525
|
|
|
441
526
|
class CreateChatCompletionMessagesUserMessageTypedDict(TypedDict):
|
|
442
|
-
role:
|
|
527
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRole
|
|
443
528
|
r"""The role of the messages author, in this case `user`."""
|
|
444
|
-
content:
|
|
529
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestContentTypedDict
|
|
445
530
|
r"""The contents of the user message."""
|
|
446
531
|
name: NotRequired[str]
|
|
447
532
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
448
533
|
|
|
449
534
|
|
|
450
535
|
class CreateChatCompletionMessagesUserMessage(BaseModel):
|
|
451
|
-
role:
|
|
536
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRequestRole
|
|
452
537
|
r"""The role of the messages author, in this case `user`."""
|
|
453
538
|
|
|
454
|
-
content:
|
|
539
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsRequestContent
|
|
455
540
|
r"""The contents of the user message."""
|
|
456
541
|
|
|
457
542
|
name: Optional[str] = None
|
|
458
543
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
459
544
|
|
|
545
|
+
@model_serializer(mode="wrap")
|
|
546
|
+
def serialize_model(self, handler):
|
|
547
|
+
optional_fields = set(["name"])
|
|
548
|
+
serialized = handler(self)
|
|
549
|
+
m = {}
|
|
460
550
|
|
|
461
|
-
|
|
551
|
+
for n, f in type(self).model_fields.items():
|
|
552
|
+
k = f.alias or n
|
|
553
|
+
val = serialized.get(k)
|
|
554
|
+
|
|
555
|
+
if val != UNSET_SENTINEL:
|
|
556
|
+
if val is not None or k not in optional_fields:
|
|
557
|
+
m[k] = val
|
|
558
|
+
|
|
559
|
+
return m
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
CreateChatCompletionMessagesRouterChatCompletionsRole = Literal["developer",]
|
|
462
563
|
r"""The role of the messages author, in this case `developer`."""
|
|
463
564
|
|
|
464
565
|
|
|
465
|
-
|
|
466
|
-
"
|
|
566
|
+
CreateChatCompletionMessagesRouterChatCompletionsContentTypedDict = TypeAliasType(
|
|
567
|
+
"CreateChatCompletionMessagesRouterChatCompletionsContentTypedDict",
|
|
467
568
|
Union[str, List[TextContentPartSchemaTypedDict]],
|
|
468
569
|
)
|
|
469
570
|
r"""The contents of the developer message."""
|
|
470
571
|
|
|
471
572
|
|
|
472
|
-
|
|
473
|
-
"
|
|
573
|
+
CreateChatCompletionMessagesRouterChatCompletionsContent = TypeAliasType(
|
|
574
|
+
"CreateChatCompletionMessagesRouterChatCompletionsContent",
|
|
575
|
+
Union[str, List[TextContentPartSchema]],
|
|
474
576
|
)
|
|
475
577
|
r"""The contents of the developer message."""
|
|
476
578
|
|
|
477
579
|
|
|
478
580
|
class CreateChatCompletionMessagesDeveloperMessageTypedDict(TypedDict):
|
|
479
|
-
role:
|
|
581
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRole
|
|
480
582
|
r"""The role of the messages author, in this case `developer`."""
|
|
481
|
-
content:
|
|
583
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsContentTypedDict
|
|
482
584
|
r"""The contents of the developer message."""
|
|
483
585
|
name: NotRequired[str]
|
|
484
586
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
485
587
|
|
|
486
588
|
|
|
487
589
|
class CreateChatCompletionMessagesDeveloperMessage(BaseModel):
|
|
488
|
-
role:
|
|
590
|
+
role: CreateChatCompletionMessagesRouterChatCompletionsRole
|
|
489
591
|
r"""The role of the messages author, in this case `developer`."""
|
|
490
592
|
|
|
491
|
-
content:
|
|
593
|
+
content: CreateChatCompletionMessagesRouterChatCompletionsContent
|
|
492
594
|
r"""The contents of the developer message."""
|
|
493
595
|
|
|
494
596
|
name: Optional[str] = None
|
|
495
597
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
496
598
|
|
|
599
|
+
@model_serializer(mode="wrap")
|
|
600
|
+
def serialize_model(self, handler):
|
|
601
|
+
optional_fields = set(["name"])
|
|
602
|
+
serialized = handler(self)
|
|
603
|
+
m = {}
|
|
604
|
+
|
|
605
|
+
for n, f in type(self).model_fields.items():
|
|
606
|
+
k = f.alias or n
|
|
607
|
+
val = serialized.get(k)
|
|
608
|
+
|
|
609
|
+
if val != UNSET_SENTINEL:
|
|
610
|
+
if val is not None or k not in optional_fields:
|
|
611
|
+
m[k] = val
|
|
612
|
+
|
|
613
|
+
return m
|
|
614
|
+
|
|
497
615
|
|
|
498
616
|
CreateChatCompletionMessagesRole = Literal["system",]
|
|
499
617
|
r"""The role of the messages author, in this case `system`."""
|
|
@@ -535,6 +653,22 @@ class CreateChatCompletionMessagesSystemMessage(BaseModel):
|
|
|
535
653
|
name: Optional[str] = None
|
|
536
654
|
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
537
655
|
|
|
656
|
+
@model_serializer(mode="wrap")
|
|
657
|
+
def serialize_model(self, handler):
|
|
658
|
+
optional_fields = set(["name"])
|
|
659
|
+
serialized = handler(self)
|
|
660
|
+
m = {}
|
|
661
|
+
|
|
662
|
+
for n, f in type(self).model_fields.items():
|
|
663
|
+
k = f.alias or n
|
|
664
|
+
val = serialized.get(k)
|
|
665
|
+
|
|
666
|
+
if val != UNSET_SENTINEL:
|
|
667
|
+
if val is not None or k not in optional_fields:
|
|
668
|
+
m[k] = val
|
|
669
|
+
|
|
670
|
+
return m
|
|
671
|
+
|
|
538
672
|
|
|
539
673
|
CreateChatCompletionMessagesTypedDict = TypeAliasType(
|
|
540
674
|
"CreateChatCompletionMessagesTypedDict",
|
|
@@ -600,10 +734,14 @@ class CreateChatCompletionAudio(BaseModel):
|
|
|
600
734
|
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
601
735
|
|
|
602
736
|
|
|
603
|
-
|
|
737
|
+
CreateChatCompletionResponseFormatRouterChatCompletionsRequestType = Literal[
|
|
738
|
+
"json_schema",
|
|
739
|
+
]
|
|
604
740
|
|
|
605
741
|
|
|
606
|
-
class
|
|
742
|
+
class CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchemaTypedDict(
|
|
743
|
+
TypedDict
|
|
744
|
+
):
|
|
607
745
|
name: str
|
|
608
746
|
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
609
747
|
description: NotRequired[str]
|
|
@@ -614,7 +752,7 @@ class CreateChatCompletionResponseFormatRouterJSONSchemaTypedDict(TypedDict):
|
|
|
614
752
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
615
753
|
|
|
616
754
|
|
|
617
|
-
class
|
|
755
|
+
class CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchema(BaseModel):
|
|
618
756
|
name: str
|
|
619
757
|
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
620
758
|
|
|
@@ -627,6 +765,22 @@ class CreateChatCompletionResponseFormatRouterJSONSchema(BaseModel):
|
|
|
627
765
|
strict: Optional[bool] = False
|
|
628
766
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
629
767
|
|
|
768
|
+
@model_serializer(mode="wrap")
|
|
769
|
+
def serialize_model(self, handler):
|
|
770
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
771
|
+
serialized = handler(self)
|
|
772
|
+
m = {}
|
|
773
|
+
|
|
774
|
+
for n, f in type(self).model_fields.items():
|
|
775
|
+
k = f.alias or n
|
|
776
|
+
val = serialized.get(k)
|
|
777
|
+
|
|
778
|
+
if val != UNSET_SENTINEL:
|
|
779
|
+
if val is not None or k not in optional_fields:
|
|
780
|
+
m[k] = val
|
|
781
|
+
|
|
782
|
+
return m
|
|
783
|
+
|
|
630
784
|
|
|
631
785
|
class CreateChatCompletionResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
632
786
|
r"""
|
|
@@ -634,8 +788,10 @@ class CreateChatCompletionResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
|
634
788
|
JSON Schema response format. Used to generate structured JSON responses
|
|
635
789
|
"""
|
|
636
790
|
|
|
637
|
-
type:
|
|
638
|
-
json_schema:
|
|
791
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsRequestType
|
|
792
|
+
json_schema: (
|
|
793
|
+
CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchemaTypedDict
|
|
794
|
+
)
|
|
639
795
|
|
|
640
796
|
|
|
641
797
|
class CreateChatCompletionResponseFormatJSONSchema(BaseModel):
|
|
@@ -644,12 +800,12 @@ class CreateChatCompletionResponseFormatJSONSchema(BaseModel):
|
|
|
644
800
|
JSON Schema response format. Used to generate structured JSON responses
|
|
645
801
|
"""
|
|
646
802
|
|
|
647
|
-
type:
|
|
803
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsRequestType
|
|
648
804
|
|
|
649
|
-
json_schema:
|
|
805
|
+
json_schema: CreateChatCompletionResponseFormatRouterChatCompletionsJSONSchema
|
|
650
806
|
|
|
651
807
|
|
|
652
|
-
|
|
808
|
+
CreateChatCompletionResponseFormatRouterChatCompletionsType = Literal["json_object",]
|
|
653
809
|
|
|
654
810
|
|
|
655
811
|
class CreateChatCompletionResponseFormatJSONObjectTypedDict(TypedDict):
|
|
@@ -658,7 +814,7 @@ class CreateChatCompletionResponseFormatJSONObjectTypedDict(TypedDict):
|
|
|
658
814
|
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
659
815
|
"""
|
|
660
816
|
|
|
661
|
-
type:
|
|
817
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsType
|
|
662
818
|
|
|
663
819
|
|
|
664
820
|
class CreateChatCompletionResponseFormatJSONObject(BaseModel):
|
|
@@ -667,7 +823,7 @@ class CreateChatCompletionResponseFormatJSONObject(BaseModel):
|
|
|
667
823
|
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
668
824
|
"""
|
|
669
825
|
|
|
670
|
-
type:
|
|
826
|
+
type: CreateChatCompletionResponseFormatRouterChatCompletionsType
|
|
671
827
|
|
|
672
828
|
|
|
673
829
|
CreateChatCompletionResponseFormatType = Literal["text",]
|
|
@@ -757,6 +913,22 @@ class CreateChatCompletionStreamOptions(BaseModel):
|
|
|
757
913
|
include_usage: Optional[bool] = None
|
|
758
914
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
759
915
|
|
|
916
|
+
@model_serializer(mode="wrap")
|
|
917
|
+
def serialize_model(self, handler):
|
|
918
|
+
optional_fields = set(["include_usage"])
|
|
919
|
+
serialized = handler(self)
|
|
920
|
+
m = {}
|
|
921
|
+
|
|
922
|
+
for n, f in type(self).model_fields.items():
|
|
923
|
+
k = f.alias or n
|
|
924
|
+
val = serialized.get(k)
|
|
925
|
+
|
|
926
|
+
if val != UNSET_SENTINEL:
|
|
927
|
+
if val is not None or k not in optional_fields:
|
|
928
|
+
m[k] = val
|
|
929
|
+
|
|
930
|
+
return m
|
|
931
|
+
|
|
760
932
|
|
|
761
933
|
CreateChatCompletionThinkingTypedDict = TypeAliasType(
|
|
762
934
|
"CreateChatCompletionThinkingTypedDict",
|
|
@@ -777,13 +949,13 @@ CreateChatCompletionType = Literal["function",]
|
|
|
777
949
|
r"""The type of the tool. Currently, only function is supported."""
|
|
778
950
|
|
|
779
951
|
|
|
780
|
-
|
|
952
|
+
CreateChatCompletionRouterChatCompletionsType = Literal["object",]
|
|
781
953
|
|
|
782
954
|
|
|
783
955
|
class CreateChatCompletionParametersTypedDict(TypedDict):
|
|
784
956
|
r"""The parameters the functions accepts, described as a JSON Schema object"""
|
|
785
957
|
|
|
786
|
-
type:
|
|
958
|
+
type: CreateChatCompletionRouterChatCompletionsType
|
|
787
959
|
properties: Dict[str, Any]
|
|
788
960
|
required: NotRequired[List[str]]
|
|
789
961
|
additional_properties: NotRequired[bool]
|
|
@@ -792,7 +964,7 @@ class CreateChatCompletionParametersTypedDict(TypedDict):
|
|
|
792
964
|
class CreateChatCompletionParameters(BaseModel):
|
|
793
965
|
r"""The parameters the functions accepts, described as a JSON Schema object"""
|
|
794
966
|
|
|
795
|
-
type:
|
|
967
|
+
type: CreateChatCompletionRouterChatCompletionsType
|
|
796
968
|
|
|
797
969
|
properties: Dict[str, Any]
|
|
798
970
|
|
|
@@ -802,6 +974,22 @@ class CreateChatCompletionParameters(BaseModel):
|
|
|
802
974
|
Optional[bool], pydantic.Field(alias="additionalProperties")
|
|
803
975
|
] = None
|
|
804
976
|
|
|
977
|
+
@model_serializer(mode="wrap")
|
|
978
|
+
def serialize_model(self, handler):
|
|
979
|
+
optional_fields = set(["required", "additionalProperties"])
|
|
980
|
+
serialized = handler(self)
|
|
981
|
+
m = {}
|
|
982
|
+
|
|
983
|
+
for n, f in type(self).model_fields.items():
|
|
984
|
+
k = f.alias or n
|
|
985
|
+
val = serialized.get(k)
|
|
986
|
+
|
|
987
|
+
if val != UNSET_SENTINEL:
|
|
988
|
+
if val is not None or k not in optional_fields:
|
|
989
|
+
m[k] = val
|
|
990
|
+
|
|
991
|
+
return m
|
|
992
|
+
|
|
805
993
|
|
|
806
994
|
class CreateChatCompletionFunctionTypedDict(TypedDict):
|
|
807
995
|
name: str
|
|
@@ -827,6 +1015,22 @@ class CreateChatCompletionFunction(BaseModel):
|
|
|
827
1015
|
strict: Optional[bool] = None
|
|
828
1016
|
r"""Whether to enable strict schema adherence when generating the function call."""
|
|
829
1017
|
|
|
1018
|
+
@model_serializer(mode="wrap")
|
|
1019
|
+
def serialize_model(self, handler):
|
|
1020
|
+
optional_fields = set(["description", "parameters", "strict"])
|
|
1021
|
+
serialized = handler(self)
|
|
1022
|
+
m = {}
|
|
1023
|
+
|
|
1024
|
+
for n, f in type(self).model_fields.items():
|
|
1025
|
+
k = f.alias or n
|
|
1026
|
+
val = serialized.get(k)
|
|
1027
|
+
|
|
1028
|
+
if val != UNSET_SENTINEL:
|
|
1029
|
+
if val is not None or k not in optional_fields:
|
|
1030
|
+
m[k] = val
|
|
1031
|
+
|
|
1032
|
+
return m
|
|
1033
|
+
|
|
830
1034
|
|
|
831
1035
|
class CreateChatCompletionToolsTypedDict(TypedDict):
|
|
832
1036
|
function: CreateChatCompletionFunctionTypedDict
|
|
@@ -840,6 +1044,22 @@ class CreateChatCompletionTools(BaseModel):
|
|
|
840
1044
|
type: Optional[CreateChatCompletionType] = None
|
|
841
1045
|
r"""The type of the tool. Currently, only function is supported."""
|
|
842
1046
|
|
|
1047
|
+
@model_serializer(mode="wrap")
|
|
1048
|
+
def serialize_model(self, handler):
|
|
1049
|
+
optional_fields = set(["type"])
|
|
1050
|
+
serialized = handler(self)
|
|
1051
|
+
m = {}
|
|
1052
|
+
|
|
1053
|
+
for n, f in type(self).model_fields.items():
|
|
1054
|
+
k = f.alias or n
|
|
1055
|
+
val = serialized.get(k)
|
|
1056
|
+
|
|
1057
|
+
if val != UNSET_SENTINEL:
|
|
1058
|
+
if val is not None or k not in optional_fields:
|
|
1059
|
+
m[k] = val
|
|
1060
|
+
|
|
1061
|
+
return m
|
|
1062
|
+
|
|
843
1063
|
|
|
844
1064
|
CreateChatCompletionToolChoiceType = Literal["function",]
|
|
845
1065
|
r"""The type of the tool. Currently, only function is supported."""
|
|
@@ -867,6 +1087,22 @@ class CreateChatCompletionToolChoice2(BaseModel):
|
|
|
867
1087
|
type: Optional[CreateChatCompletionToolChoiceType] = None
|
|
868
1088
|
r"""The type of the tool. Currently, only function is supported."""
|
|
869
1089
|
|
|
1090
|
+
@model_serializer(mode="wrap")
|
|
1091
|
+
def serialize_model(self, handler):
|
|
1092
|
+
optional_fields = set(["type"])
|
|
1093
|
+
serialized = handler(self)
|
|
1094
|
+
m = {}
|
|
1095
|
+
|
|
1096
|
+
for n, f in type(self).model_fields.items():
|
|
1097
|
+
k = f.alias or n
|
|
1098
|
+
val = serialized.get(k)
|
|
1099
|
+
|
|
1100
|
+
if val != UNSET_SENTINEL:
|
|
1101
|
+
if val is not None or k not in optional_fields:
|
|
1102
|
+
m[k] = val
|
|
1103
|
+
|
|
1104
|
+
return m
|
|
1105
|
+
|
|
870
1106
|
|
|
871
1107
|
CreateChatCompletionToolChoice1 = Literal[
|
|
872
1108
|
"none",
|
|
@@ -933,6 +1169,16 @@ class CreateChatCompletionGuardrails(BaseModel):
|
|
|
933
1169
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
934
1170
|
|
|
935
1171
|
|
|
1172
|
+
class CreateChatCompletionFallbacksTypedDict(TypedDict):
|
|
1173
|
+
model: str
|
|
1174
|
+
r"""Fallback model identifier"""
|
|
1175
|
+
|
|
1176
|
+
|
|
1177
|
+
class CreateChatCompletionFallbacks(BaseModel):
|
|
1178
|
+
model: str
|
|
1179
|
+
r"""Fallback model identifier"""
|
|
1180
|
+
|
|
1181
|
+
|
|
936
1182
|
class CreateChatCompletionRetryTypedDict(TypedDict):
|
|
937
1183
|
r"""Retry configuration for the request"""
|
|
938
1184
|
|
|
@@ -951,13 +1197,167 @@ class CreateChatCompletionRetry(BaseModel):
|
|
|
951
1197
|
on_codes: Optional[List[float]] = None
|
|
952
1198
|
r"""HTTP status codes that trigger retry logic"""
|
|
953
1199
|
|
|
1200
|
+
@model_serializer(mode="wrap")
|
|
1201
|
+
def serialize_model(self, handler):
|
|
1202
|
+
optional_fields = set(["count", "on_codes"])
|
|
1203
|
+
serialized = handler(self)
|
|
1204
|
+
m = {}
|
|
1205
|
+
|
|
1206
|
+
for n, f in type(self).model_fields.items():
|
|
1207
|
+
k = f.alias or n
|
|
1208
|
+
val = serialized.get(k)
|
|
1209
|
+
|
|
1210
|
+
if val != UNSET_SENTINEL:
|
|
1211
|
+
if val is not None or k not in optional_fields:
|
|
1212
|
+
m[k] = val
|
|
1213
|
+
|
|
1214
|
+
return m
|
|
1215
|
+
|
|
1216
|
+
|
|
1217
|
+
CreateChatCompletionRouterChatCompletionsRequestType = Literal["exact_match",]
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
class CreateChatCompletionCacheTypedDict(TypedDict):
|
|
1221
|
+
r"""Cache configuration for the request."""
|
|
1222
|
+
|
|
1223
|
+
type: CreateChatCompletionRouterChatCompletionsRequestType
|
|
1224
|
+
ttl: NotRequired[float]
|
|
1225
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1226
|
+
|
|
1227
|
+
|
|
1228
|
+
class CreateChatCompletionCache(BaseModel):
|
|
1229
|
+
r"""Cache configuration for the request."""
|
|
1230
|
+
|
|
1231
|
+
type: CreateChatCompletionRouterChatCompletionsRequestType
|
|
1232
|
+
|
|
1233
|
+
ttl: Optional[float] = 1800
|
|
1234
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1235
|
+
|
|
1236
|
+
@model_serializer(mode="wrap")
|
|
1237
|
+
def serialize_model(self, handler):
|
|
1238
|
+
optional_fields = set(["ttl"])
|
|
1239
|
+
serialized = handler(self)
|
|
1240
|
+
m = {}
|
|
1241
|
+
|
|
1242
|
+
for n, f in type(self).model_fields.items():
|
|
1243
|
+
k = f.alias or n
|
|
1244
|
+
val = serialized.get(k)
|
|
1245
|
+
|
|
1246
|
+
if val != UNSET_SENTINEL:
|
|
1247
|
+
if val is not None or k not in optional_fields:
|
|
1248
|
+
m[k] = val
|
|
1249
|
+
|
|
1250
|
+
return m
|
|
1251
|
+
|
|
1252
|
+
|
|
1253
|
+
CreateChatCompletionLoadBalancerType = Literal["weight_based",]
|
|
1254
|
+
|
|
1255
|
+
|
|
1256
|
+
class CreateChatCompletionLoadBalancerModelsTypedDict(TypedDict):
|
|
1257
|
+
model: str
|
|
1258
|
+
r"""Model identifier for load balancing"""
|
|
1259
|
+
weight: NotRequired[float]
|
|
1260
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1261
|
+
|
|
1262
|
+
|
|
1263
|
+
class CreateChatCompletionLoadBalancerModels(BaseModel):
|
|
1264
|
+
model: str
|
|
1265
|
+
r"""Model identifier for load balancing"""
|
|
1266
|
+
|
|
1267
|
+
weight: Optional[float] = 0.5
|
|
1268
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1269
|
+
|
|
1270
|
+
@model_serializer(mode="wrap")
|
|
1271
|
+
def serialize_model(self, handler):
|
|
1272
|
+
optional_fields = set(["weight"])
|
|
1273
|
+
serialized = handler(self)
|
|
1274
|
+
m = {}
|
|
1275
|
+
|
|
1276
|
+
for n, f in type(self).model_fields.items():
|
|
1277
|
+
k = f.alias or n
|
|
1278
|
+
val = serialized.get(k)
|
|
1279
|
+
|
|
1280
|
+
if val != UNSET_SENTINEL:
|
|
1281
|
+
if val is not None or k not in optional_fields:
|
|
1282
|
+
m[k] = val
|
|
1283
|
+
|
|
1284
|
+
return m
|
|
1285
|
+
|
|
1286
|
+
|
|
1287
|
+
class CreateChatCompletionLoadBalancer1TypedDict(TypedDict):
|
|
1288
|
+
type: CreateChatCompletionLoadBalancerType
|
|
1289
|
+
models: List[CreateChatCompletionLoadBalancerModelsTypedDict]
|
|
1290
|
+
|
|
1291
|
+
|
|
1292
|
+
class CreateChatCompletionLoadBalancer1(BaseModel):
|
|
1293
|
+
type: CreateChatCompletionLoadBalancerType
|
|
1294
|
+
|
|
1295
|
+
models: List[CreateChatCompletionLoadBalancerModels]
|
|
1296
|
+
|
|
1297
|
+
|
|
1298
|
+
CreateChatCompletionLoadBalancerTypedDict = CreateChatCompletionLoadBalancer1TypedDict
|
|
1299
|
+
r"""Load balancer configuration for the request."""
|
|
1300
|
+
|
|
1301
|
+
|
|
1302
|
+
CreateChatCompletionLoadBalancer = CreateChatCompletionLoadBalancer1
|
|
1303
|
+
r"""Load balancer configuration for the request."""
|
|
1304
|
+
|
|
1305
|
+
|
|
1306
|
+
class CreateChatCompletionTimeoutTypedDict(TypedDict):
|
|
1307
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1308
|
+
|
|
1309
|
+
call_timeout: float
|
|
1310
|
+
r"""Timeout value in milliseconds"""
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
class CreateChatCompletionTimeout(BaseModel):
|
|
1314
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1315
|
+
|
|
1316
|
+
call_timeout: float
|
|
1317
|
+
r"""Timeout value in milliseconds"""
|
|
1318
|
+
|
|
1319
|
+
|
|
1320
|
+
class CreateChatCompletionRouterChatCompletionsRetryTypedDict(TypedDict):
|
|
1321
|
+
r"""Retry configuration for the request"""
|
|
1322
|
+
|
|
1323
|
+
count: NotRequired[float]
|
|
1324
|
+
r"""Number of retry attempts (1-5)"""
|
|
1325
|
+
on_codes: NotRequired[List[float]]
|
|
1326
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1327
|
+
|
|
1328
|
+
|
|
1329
|
+
class CreateChatCompletionRouterChatCompletionsRetry(BaseModel):
|
|
1330
|
+
r"""Retry configuration for the request"""
|
|
1331
|
+
|
|
1332
|
+
count: Optional[float] = 3
|
|
1333
|
+
r"""Number of retry attempts (1-5)"""
|
|
1334
|
+
|
|
1335
|
+
on_codes: Optional[List[float]] = None
|
|
1336
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1337
|
+
|
|
1338
|
+
@model_serializer(mode="wrap")
|
|
1339
|
+
def serialize_model(self, handler):
|
|
1340
|
+
optional_fields = set(["count", "on_codes"])
|
|
1341
|
+
serialized = handler(self)
|
|
1342
|
+
m = {}
|
|
954
1343
|
|
|
955
|
-
|
|
1344
|
+
for n, f in type(self).model_fields.items():
|
|
1345
|
+
k = f.alias or n
|
|
1346
|
+
val = serialized.get(k)
|
|
1347
|
+
|
|
1348
|
+
if val != UNSET_SENTINEL:
|
|
1349
|
+
if val is not None or k not in optional_fields:
|
|
1350
|
+
m[k] = val
|
|
1351
|
+
|
|
1352
|
+
return m
|
|
1353
|
+
|
|
1354
|
+
|
|
1355
|
+
class CreateChatCompletionRouterChatCompletionsFallbacksTypedDict(TypedDict):
|
|
956
1356
|
model: str
|
|
957
1357
|
r"""Fallback model identifier"""
|
|
958
1358
|
|
|
959
1359
|
|
|
960
|
-
class
|
|
1360
|
+
class CreateChatCompletionRouterChatCompletionsFallbacks(BaseModel):
|
|
961
1361
|
model: str
|
|
962
1362
|
r"""Fallback model identifier"""
|
|
963
1363
|
|
|
@@ -985,51 +1385,6 @@ class Prompt(BaseModel):
|
|
|
985
1385
|
r"""Version of the prompt to use (currently only \"latest\" supported)"""
|
|
986
1386
|
|
|
987
1387
|
|
|
988
|
-
@deprecated(
|
|
989
|
-
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
990
|
-
)
|
|
991
|
-
class CreateChatCompletionContactTypedDict(TypedDict):
|
|
992
|
-
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
993
|
-
|
|
994
|
-
id: str
|
|
995
|
-
r"""Unique identifier for the contact"""
|
|
996
|
-
display_name: NotRequired[str]
|
|
997
|
-
r"""Display name of the contact"""
|
|
998
|
-
email: NotRequired[str]
|
|
999
|
-
r"""Email address of the contact"""
|
|
1000
|
-
metadata: NotRequired[List[Dict[str, Any]]]
|
|
1001
|
-
r"""A hash of key/value pairs containing any other data about the contact"""
|
|
1002
|
-
logo_url: NotRequired[str]
|
|
1003
|
-
r"""URL to the contact's avatar or logo"""
|
|
1004
|
-
tags: NotRequired[List[str]]
|
|
1005
|
-
r"""A list of tags associated with the contact"""
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
@deprecated(
|
|
1009
|
-
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
1010
|
-
)
|
|
1011
|
-
class CreateChatCompletionContact(BaseModel):
|
|
1012
|
-
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
1013
|
-
|
|
1014
|
-
id: str
|
|
1015
|
-
r"""Unique identifier for the contact"""
|
|
1016
|
-
|
|
1017
|
-
display_name: Optional[str] = None
|
|
1018
|
-
r"""Display name of the contact"""
|
|
1019
|
-
|
|
1020
|
-
email: Optional[str] = None
|
|
1021
|
-
r"""Email address of the contact"""
|
|
1022
|
-
|
|
1023
|
-
metadata: Optional[List[Dict[str, Any]]] = None
|
|
1024
|
-
r"""A hash of key/value pairs containing any other data about the contact"""
|
|
1025
|
-
|
|
1026
|
-
logo_url: Optional[str] = None
|
|
1027
|
-
r"""URL to the contact's avatar or logo"""
|
|
1028
|
-
|
|
1029
|
-
tags: Optional[List[str]] = None
|
|
1030
|
-
r"""A list of tags associated with the contact"""
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
1388
|
class CreateChatCompletionThreadTypedDict(TypedDict):
|
|
1034
1389
|
r"""Thread information to group related requests"""
|
|
1035
1390
|
|
|
@@ -1048,6 +1403,22 @@ class CreateChatCompletionThread(BaseModel):
|
|
|
1048
1403
|
tags: Optional[List[str]] = None
|
|
1049
1404
|
r"""Optional tags to differentiate or categorize threads"""
|
|
1050
1405
|
|
|
1406
|
+
@model_serializer(mode="wrap")
|
|
1407
|
+
def serialize_model(self, handler):
|
|
1408
|
+
optional_fields = set(["tags"])
|
|
1409
|
+
serialized = handler(self)
|
|
1410
|
+
m = {}
|
|
1411
|
+
|
|
1412
|
+
for n, f in type(self).model_fields.items():
|
|
1413
|
+
k = f.alias or n
|
|
1414
|
+
val = serialized.get(k)
|
|
1415
|
+
|
|
1416
|
+
if val != UNSET_SENTINEL:
|
|
1417
|
+
if val is not None or k not in optional_fields:
|
|
1418
|
+
m[k] = val
|
|
1419
|
+
|
|
1420
|
+
return m
|
|
1421
|
+
|
|
1051
1422
|
|
|
1052
1423
|
class Inputs2TypedDict(TypedDict):
|
|
1053
1424
|
key: str
|
|
@@ -1062,6 +1433,22 @@ class Inputs2(BaseModel):
|
|
|
1062
1433
|
|
|
1063
1434
|
is_pii: Optional[bool] = None
|
|
1064
1435
|
|
|
1436
|
+
@model_serializer(mode="wrap")
|
|
1437
|
+
def serialize_model(self, handler):
|
|
1438
|
+
optional_fields = set(["value", "is_pii"])
|
|
1439
|
+
serialized = handler(self)
|
|
1440
|
+
m = {}
|
|
1441
|
+
|
|
1442
|
+
for n, f in type(self).model_fields.items():
|
|
1443
|
+
k = f.alias or n
|
|
1444
|
+
val = serialized.get(k)
|
|
1445
|
+
|
|
1446
|
+
if val != UNSET_SENTINEL:
|
|
1447
|
+
if val is not None or k not in optional_fields:
|
|
1448
|
+
m[k] = val
|
|
1449
|
+
|
|
1450
|
+
return m
|
|
1451
|
+
|
|
1065
1452
|
|
|
1066
1453
|
InputsTypedDict = TypeAliasType(
|
|
1067
1454
|
"InputsTypedDict", Union[Dict[str, Any], List[Inputs2TypedDict]]
|
|
@@ -1073,25 +1460,43 @@ Inputs = TypeAliasType("Inputs", Union[Dict[str, Any], List[Inputs2]])
|
|
|
1073
1460
|
r"""Values to replace in the prompt messages using {{variableName}} syntax"""
|
|
1074
1461
|
|
|
1075
1462
|
|
|
1076
|
-
|
|
1463
|
+
CreateChatCompletionRouterChatCompletionsRequestRequestBodyType = Literal[
|
|
1464
|
+
"exact_match",
|
|
1465
|
+
]
|
|
1077
1466
|
|
|
1078
1467
|
|
|
1079
|
-
class
|
|
1468
|
+
class CreateChatCompletionRouterChatCompletionsCacheTypedDict(TypedDict):
|
|
1080
1469
|
r"""Cache configuration for the request."""
|
|
1081
1470
|
|
|
1082
|
-
type:
|
|
1471
|
+
type: CreateChatCompletionRouterChatCompletionsRequestRequestBodyType
|
|
1083
1472
|
ttl: NotRequired[float]
|
|
1084
1473
|
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1085
1474
|
|
|
1086
1475
|
|
|
1087
|
-
class
|
|
1476
|
+
class CreateChatCompletionRouterChatCompletionsCache(BaseModel):
|
|
1088
1477
|
r"""Cache configuration for the request."""
|
|
1089
1478
|
|
|
1090
|
-
type:
|
|
1479
|
+
type: CreateChatCompletionRouterChatCompletionsRequestRequestBodyType
|
|
1091
1480
|
|
|
1092
1481
|
ttl: Optional[float] = 1800
|
|
1093
1482
|
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1094
1483
|
|
|
1484
|
+
@model_serializer(mode="wrap")
|
|
1485
|
+
def serialize_model(self, handler):
|
|
1486
|
+
optional_fields = set(["ttl"])
|
|
1487
|
+
serialized = handler(self)
|
|
1488
|
+
m = {}
|
|
1489
|
+
|
|
1490
|
+
for n, f in type(self).model_fields.items():
|
|
1491
|
+
k = f.alias or n
|
|
1492
|
+
val = serialized.get(k)
|
|
1493
|
+
|
|
1494
|
+
if val != UNSET_SENTINEL:
|
|
1495
|
+
if val is not None or k not in optional_fields:
|
|
1496
|
+
m[k] = val
|
|
1497
|
+
|
|
1498
|
+
return m
|
|
1499
|
+
|
|
1095
1500
|
|
|
1096
1501
|
CreateChatCompletionSearchType = Literal[
|
|
1097
1502
|
"vector_search",
|
|
@@ -1113,48 +1518,50 @@ class CreateChatCompletionOrExists(BaseModel):
|
|
|
1113
1518
|
exists: bool
|
|
1114
1519
|
|
|
1115
1520
|
|
|
1116
|
-
|
|
1117
|
-
"
|
|
1521
|
+
CreateChatCompletionOrRouterChatCompletionsNinTypedDict = TypeAliasType(
|
|
1522
|
+
"CreateChatCompletionOrRouterChatCompletionsNinTypedDict", Union[str, float, bool]
|
|
1118
1523
|
)
|
|
1119
1524
|
|
|
1120
1525
|
|
|
1121
|
-
|
|
1122
|
-
"
|
|
1526
|
+
CreateChatCompletionOrRouterChatCompletionsNin = TypeAliasType(
|
|
1527
|
+
"CreateChatCompletionOrRouterChatCompletionsNin", Union[str, float, bool]
|
|
1123
1528
|
)
|
|
1124
1529
|
|
|
1125
1530
|
|
|
1126
1531
|
class CreateChatCompletionOrNinTypedDict(TypedDict):
|
|
1127
1532
|
r"""Not in"""
|
|
1128
1533
|
|
|
1129
|
-
nin: List[
|
|
1534
|
+
nin: List[CreateChatCompletionOrRouterChatCompletionsNinTypedDict]
|
|
1130
1535
|
|
|
1131
1536
|
|
|
1132
1537
|
class CreateChatCompletionOrNin(BaseModel):
|
|
1133
1538
|
r"""Not in"""
|
|
1134
1539
|
|
|
1135
|
-
nin: List[
|
|
1540
|
+
nin: List[CreateChatCompletionOrRouterChatCompletionsNin]
|
|
1136
1541
|
|
|
1137
1542
|
|
|
1138
|
-
|
|
1139
|
-
"
|
|
1543
|
+
CreateChatCompletionOrRouterChatCompletionsInTypedDict = TypeAliasType(
|
|
1544
|
+
"CreateChatCompletionOrRouterChatCompletionsInTypedDict", Union[str, float, bool]
|
|
1140
1545
|
)
|
|
1141
1546
|
|
|
1142
1547
|
|
|
1143
|
-
|
|
1144
|
-
"
|
|
1548
|
+
CreateChatCompletionOrRouterChatCompletionsIn = TypeAliasType(
|
|
1549
|
+
"CreateChatCompletionOrRouterChatCompletionsIn", Union[str, float, bool]
|
|
1145
1550
|
)
|
|
1146
1551
|
|
|
1147
1552
|
|
|
1148
1553
|
class CreateChatCompletionOrInTypedDict(TypedDict):
|
|
1149
1554
|
r"""In"""
|
|
1150
1555
|
|
|
1151
|
-
in_: List[
|
|
1556
|
+
in_: List[CreateChatCompletionOrRouterChatCompletionsInTypedDict]
|
|
1152
1557
|
|
|
1153
1558
|
|
|
1154
1559
|
class CreateChatCompletionOrIn(BaseModel):
|
|
1155
1560
|
r"""In"""
|
|
1156
1561
|
|
|
1157
|
-
in_: Annotated[
|
|
1562
|
+
in_: Annotated[
|
|
1563
|
+
List[CreateChatCompletionOrRouterChatCompletionsIn], pydantic.Field(alias="in")
|
|
1564
|
+
]
|
|
1158
1565
|
|
|
1159
1566
|
|
|
1160
1567
|
class CreateChatCompletionOrLteTypedDict(TypedDict):
|
|
@@ -1205,52 +1612,52 @@ class CreateChatCompletionOrGt(BaseModel):
|
|
|
1205
1612
|
gt: float
|
|
1206
1613
|
|
|
1207
1614
|
|
|
1208
|
-
|
|
1209
|
-
"
|
|
1615
|
+
CreateChatCompletionOrRouterChatCompletionsNeTypedDict = TypeAliasType(
|
|
1616
|
+
"CreateChatCompletionOrRouterChatCompletionsNeTypedDict", Union[str, float, bool]
|
|
1210
1617
|
)
|
|
1211
1618
|
|
|
1212
1619
|
|
|
1213
|
-
|
|
1214
|
-
"
|
|
1620
|
+
CreateChatCompletionOrRouterChatCompletionsNe = TypeAliasType(
|
|
1621
|
+
"CreateChatCompletionOrRouterChatCompletionsNe", Union[str, float, bool]
|
|
1215
1622
|
)
|
|
1216
1623
|
|
|
1217
1624
|
|
|
1218
1625
|
class CreateChatCompletionOrNeTypedDict(TypedDict):
|
|
1219
1626
|
r"""Not equal to"""
|
|
1220
1627
|
|
|
1221
|
-
ne:
|
|
1628
|
+
ne: CreateChatCompletionOrRouterChatCompletionsNeTypedDict
|
|
1222
1629
|
|
|
1223
1630
|
|
|
1224
1631
|
class CreateChatCompletionOrNe(BaseModel):
|
|
1225
1632
|
r"""Not equal to"""
|
|
1226
1633
|
|
|
1227
|
-
ne:
|
|
1634
|
+
ne: CreateChatCompletionOrRouterChatCompletionsNe
|
|
1228
1635
|
|
|
1229
1636
|
|
|
1230
|
-
|
|
1231
|
-
"
|
|
1637
|
+
CreateChatCompletionOrRouterChatCompletionsEqTypedDict = TypeAliasType(
|
|
1638
|
+
"CreateChatCompletionOrRouterChatCompletionsEqTypedDict", Union[str, float, bool]
|
|
1232
1639
|
)
|
|
1233
1640
|
|
|
1234
1641
|
|
|
1235
|
-
|
|
1236
|
-
"
|
|
1642
|
+
CreateChatCompletionOrRouterChatCompletionsEq = TypeAliasType(
|
|
1643
|
+
"CreateChatCompletionOrRouterChatCompletionsEq", Union[str, float, bool]
|
|
1237
1644
|
)
|
|
1238
1645
|
|
|
1239
1646
|
|
|
1240
1647
|
class CreateChatCompletionOrEqTypedDict(TypedDict):
|
|
1241
1648
|
r"""Equal to"""
|
|
1242
1649
|
|
|
1243
|
-
eq:
|
|
1650
|
+
eq: CreateChatCompletionOrRouterChatCompletionsEqTypedDict
|
|
1244
1651
|
|
|
1245
1652
|
|
|
1246
1653
|
class CreateChatCompletionOrEq(BaseModel):
|
|
1247
1654
|
r"""Equal to"""
|
|
1248
1655
|
|
|
1249
|
-
eq:
|
|
1656
|
+
eq: CreateChatCompletionOrRouterChatCompletionsEq
|
|
1250
1657
|
|
|
1251
1658
|
|
|
1252
|
-
|
|
1253
|
-
"
|
|
1659
|
+
CreateChatCompletionFilterByRouterChatCompletionsOrTypedDict = TypeAliasType(
|
|
1660
|
+
"CreateChatCompletionFilterByRouterChatCompletionsOrTypedDict",
|
|
1254
1661
|
Union[
|
|
1255
1662
|
CreateChatCompletionOrEqTypedDict,
|
|
1256
1663
|
CreateChatCompletionOrNeTypedDict,
|
|
@@ -1265,8 +1672,8 @@ CreateChatCompletionFilterByRouterOrTypedDict = TypeAliasType(
|
|
|
1265
1672
|
)
|
|
1266
1673
|
|
|
1267
1674
|
|
|
1268
|
-
|
|
1269
|
-
"
|
|
1675
|
+
CreateChatCompletionFilterByRouterChatCompletionsOr = TypeAliasType(
|
|
1676
|
+
"CreateChatCompletionFilterByRouterChatCompletionsOr",
|
|
1270
1677
|
Union[
|
|
1271
1678
|
CreateChatCompletionOrEq,
|
|
1272
1679
|
CreateChatCompletionOrNe,
|
|
@@ -1284,14 +1691,14 @@ CreateChatCompletionFilterByRouterOr = TypeAliasType(
|
|
|
1284
1691
|
class CreateChatCompletionFilterByOrTypedDict(TypedDict):
|
|
1285
1692
|
r"""Or"""
|
|
1286
1693
|
|
|
1287
|
-
or_: List[Dict[str,
|
|
1694
|
+
or_: List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsOrTypedDict]]
|
|
1288
1695
|
|
|
1289
1696
|
|
|
1290
1697
|
class CreateChatCompletionFilterByOr(BaseModel):
|
|
1291
1698
|
r"""Or"""
|
|
1292
1699
|
|
|
1293
1700
|
or_: Annotated[
|
|
1294
|
-
List[Dict[str,
|
|
1701
|
+
List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsOr]],
|
|
1295
1702
|
pydantic.Field(alias="or"),
|
|
1296
1703
|
]
|
|
1297
1704
|
|
|
@@ -1308,48 +1715,50 @@ class CreateChatCompletionAndExists(BaseModel):
|
|
|
1308
1715
|
exists: bool
|
|
1309
1716
|
|
|
1310
1717
|
|
|
1311
|
-
|
|
1312
|
-
"
|
|
1718
|
+
CreateChatCompletionAndRouterChatCompletionsNinTypedDict = TypeAliasType(
|
|
1719
|
+
"CreateChatCompletionAndRouterChatCompletionsNinTypedDict", Union[str, float, bool]
|
|
1313
1720
|
)
|
|
1314
1721
|
|
|
1315
1722
|
|
|
1316
|
-
|
|
1317
|
-
"
|
|
1723
|
+
CreateChatCompletionAndRouterChatCompletionsNin = TypeAliasType(
|
|
1724
|
+
"CreateChatCompletionAndRouterChatCompletionsNin", Union[str, float, bool]
|
|
1318
1725
|
)
|
|
1319
1726
|
|
|
1320
1727
|
|
|
1321
1728
|
class CreateChatCompletionAndNinTypedDict(TypedDict):
|
|
1322
1729
|
r"""Not in"""
|
|
1323
1730
|
|
|
1324
|
-
nin: List[
|
|
1731
|
+
nin: List[CreateChatCompletionAndRouterChatCompletionsNinTypedDict]
|
|
1325
1732
|
|
|
1326
1733
|
|
|
1327
1734
|
class CreateChatCompletionAndNin(BaseModel):
|
|
1328
1735
|
r"""Not in"""
|
|
1329
1736
|
|
|
1330
|
-
nin: List[
|
|
1737
|
+
nin: List[CreateChatCompletionAndRouterChatCompletionsNin]
|
|
1331
1738
|
|
|
1332
1739
|
|
|
1333
|
-
|
|
1334
|
-
"
|
|
1740
|
+
CreateChatCompletionAndRouterChatCompletionsInTypedDict = TypeAliasType(
|
|
1741
|
+
"CreateChatCompletionAndRouterChatCompletionsInTypedDict", Union[str, float, bool]
|
|
1335
1742
|
)
|
|
1336
1743
|
|
|
1337
1744
|
|
|
1338
|
-
|
|
1339
|
-
"
|
|
1745
|
+
CreateChatCompletionAndRouterChatCompletionsIn = TypeAliasType(
|
|
1746
|
+
"CreateChatCompletionAndRouterChatCompletionsIn", Union[str, float, bool]
|
|
1340
1747
|
)
|
|
1341
1748
|
|
|
1342
1749
|
|
|
1343
1750
|
class CreateChatCompletionAndInTypedDict(TypedDict):
|
|
1344
1751
|
r"""In"""
|
|
1345
1752
|
|
|
1346
|
-
in_: List[
|
|
1753
|
+
in_: List[CreateChatCompletionAndRouterChatCompletionsInTypedDict]
|
|
1347
1754
|
|
|
1348
1755
|
|
|
1349
1756
|
class CreateChatCompletionAndIn(BaseModel):
|
|
1350
1757
|
r"""In"""
|
|
1351
1758
|
|
|
1352
|
-
in_: Annotated[
|
|
1759
|
+
in_: Annotated[
|
|
1760
|
+
List[CreateChatCompletionAndRouterChatCompletionsIn], pydantic.Field(alias="in")
|
|
1761
|
+
]
|
|
1353
1762
|
|
|
1354
1763
|
|
|
1355
1764
|
class CreateChatCompletionAndLteTypedDict(TypedDict):
|
|
@@ -1400,52 +1809,52 @@ class CreateChatCompletionAndGt(BaseModel):
|
|
|
1400
1809
|
gt: float
|
|
1401
1810
|
|
|
1402
1811
|
|
|
1403
|
-
|
|
1404
|
-
"
|
|
1812
|
+
CreateChatCompletionAndRouterChatCompletionsNeTypedDict = TypeAliasType(
|
|
1813
|
+
"CreateChatCompletionAndRouterChatCompletionsNeTypedDict", Union[str, float, bool]
|
|
1405
1814
|
)
|
|
1406
1815
|
|
|
1407
1816
|
|
|
1408
|
-
|
|
1409
|
-
"
|
|
1817
|
+
CreateChatCompletionAndRouterChatCompletionsNe = TypeAliasType(
|
|
1818
|
+
"CreateChatCompletionAndRouterChatCompletionsNe", Union[str, float, bool]
|
|
1410
1819
|
)
|
|
1411
1820
|
|
|
1412
1821
|
|
|
1413
1822
|
class CreateChatCompletionAndNeTypedDict(TypedDict):
|
|
1414
1823
|
r"""Not equal to"""
|
|
1415
1824
|
|
|
1416
|
-
ne:
|
|
1825
|
+
ne: CreateChatCompletionAndRouterChatCompletionsNeTypedDict
|
|
1417
1826
|
|
|
1418
1827
|
|
|
1419
1828
|
class CreateChatCompletionAndNe(BaseModel):
|
|
1420
1829
|
r"""Not equal to"""
|
|
1421
1830
|
|
|
1422
|
-
ne:
|
|
1831
|
+
ne: CreateChatCompletionAndRouterChatCompletionsNe
|
|
1423
1832
|
|
|
1424
1833
|
|
|
1425
|
-
|
|
1426
|
-
"
|
|
1834
|
+
CreateChatCompletionAndRouterChatCompletionsEqTypedDict = TypeAliasType(
|
|
1835
|
+
"CreateChatCompletionAndRouterChatCompletionsEqTypedDict", Union[str, float, bool]
|
|
1427
1836
|
)
|
|
1428
1837
|
|
|
1429
1838
|
|
|
1430
|
-
|
|
1431
|
-
"
|
|
1839
|
+
CreateChatCompletionAndRouterChatCompletionsEq = TypeAliasType(
|
|
1840
|
+
"CreateChatCompletionAndRouterChatCompletionsEq", Union[str, float, bool]
|
|
1432
1841
|
)
|
|
1433
1842
|
|
|
1434
1843
|
|
|
1435
1844
|
class CreateChatCompletionAndEqTypedDict(TypedDict):
|
|
1436
1845
|
r"""Equal to"""
|
|
1437
1846
|
|
|
1438
|
-
eq:
|
|
1847
|
+
eq: CreateChatCompletionAndRouterChatCompletionsEqTypedDict
|
|
1439
1848
|
|
|
1440
1849
|
|
|
1441
1850
|
class CreateChatCompletionAndEq(BaseModel):
|
|
1442
1851
|
r"""Equal to"""
|
|
1443
1852
|
|
|
1444
|
-
eq:
|
|
1853
|
+
eq: CreateChatCompletionAndRouterChatCompletionsEq
|
|
1445
1854
|
|
|
1446
1855
|
|
|
1447
|
-
|
|
1448
|
-
"
|
|
1856
|
+
CreateChatCompletionFilterByRouterChatCompletionsAndTypedDict = TypeAliasType(
|
|
1857
|
+
"CreateChatCompletionFilterByRouterChatCompletionsAndTypedDict",
|
|
1449
1858
|
Union[
|
|
1450
1859
|
CreateChatCompletionAndEqTypedDict,
|
|
1451
1860
|
CreateChatCompletionAndNeTypedDict,
|
|
@@ -1460,8 +1869,8 @@ CreateChatCompletionFilterByRouterAndTypedDict = TypeAliasType(
|
|
|
1460
1869
|
)
|
|
1461
1870
|
|
|
1462
1871
|
|
|
1463
|
-
|
|
1464
|
-
"
|
|
1872
|
+
CreateChatCompletionFilterByRouterChatCompletionsAnd = TypeAliasType(
|
|
1873
|
+
"CreateChatCompletionFilterByRouterChatCompletionsAnd",
|
|
1465
1874
|
Union[
|
|
1466
1875
|
CreateChatCompletionAndEq,
|
|
1467
1876
|
CreateChatCompletionAndNe,
|
|
@@ -1479,14 +1888,14 @@ CreateChatCompletionFilterByRouterAnd = TypeAliasType(
|
|
|
1479
1888
|
class CreateChatCompletionFilterByAndTypedDict(TypedDict):
|
|
1480
1889
|
r"""And"""
|
|
1481
1890
|
|
|
1482
|
-
and_: List[Dict[str,
|
|
1891
|
+
and_: List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsAndTypedDict]]
|
|
1483
1892
|
|
|
1484
1893
|
|
|
1485
1894
|
class CreateChatCompletionFilterByAnd(BaseModel):
|
|
1486
1895
|
r"""And"""
|
|
1487
1896
|
|
|
1488
1897
|
and_: Annotated[
|
|
1489
|
-
List[Dict[str,
|
|
1898
|
+
List[Dict[str, CreateChatCompletionFilterByRouterChatCompletionsAnd]],
|
|
1490
1899
|
pydantic.Field(alias="and"),
|
|
1491
1900
|
]
|
|
1492
1901
|
|
|
@@ -1503,48 +1912,50 @@ class CreateChatCompletion1Exists(BaseModel):
|
|
|
1503
1912
|
exists: bool
|
|
1504
1913
|
|
|
1505
1914
|
|
|
1506
|
-
|
|
1507
|
-
"
|
|
1915
|
+
CreateChatCompletion1RouterChatCompletionsNinTypedDict = TypeAliasType(
|
|
1916
|
+
"CreateChatCompletion1RouterChatCompletionsNinTypedDict", Union[str, float, bool]
|
|
1508
1917
|
)
|
|
1509
1918
|
|
|
1510
1919
|
|
|
1511
|
-
|
|
1512
|
-
"
|
|
1920
|
+
CreateChatCompletion1RouterChatCompletionsNin = TypeAliasType(
|
|
1921
|
+
"CreateChatCompletion1RouterChatCompletionsNin", Union[str, float, bool]
|
|
1513
1922
|
)
|
|
1514
1923
|
|
|
1515
1924
|
|
|
1516
1925
|
class CreateChatCompletion1NinTypedDict(TypedDict):
|
|
1517
1926
|
r"""Not in"""
|
|
1518
1927
|
|
|
1519
|
-
nin: List[
|
|
1928
|
+
nin: List[CreateChatCompletion1RouterChatCompletionsNinTypedDict]
|
|
1520
1929
|
|
|
1521
1930
|
|
|
1522
1931
|
class CreateChatCompletion1Nin(BaseModel):
|
|
1523
1932
|
r"""Not in"""
|
|
1524
1933
|
|
|
1525
|
-
nin: List[
|
|
1934
|
+
nin: List[CreateChatCompletion1RouterChatCompletionsNin]
|
|
1526
1935
|
|
|
1527
1936
|
|
|
1528
|
-
|
|
1529
|
-
"
|
|
1937
|
+
CreateChatCompletion1RouterChatCompletionsInTypedDict = TypeAliasType(
|
|
1938
|
+
"CreateChatCompletion1RouterChatCompletionsInTypedDict", Union[str, float, bool]
|
|
1530
1939
|
)
|
|
1531
1940
|
|
|
1532
1941
|
|
|
1533
|
-
|
|
1534
|
-
"
|
|
1942
|
+
CreateChatCompletion1RouterChatCompletionsIn = TypeAliasType(
|
|
1943
|
+
"CreateChatCompletion1RouterChatCompletionsIn", Union[str, float, bool]
|
|
1535
1944
|
)
|
|
1536
1945
|
|
|
1537
1946
|
|
|
1538
1947
|
class CreateChatCompletion1InTypedDict(TypedDict):
|
|
1539
1948
|
r"""In"""
|
|
1540
1949
|
|
|
1541
|
-
in_: List[
|
|
1950
|
+
in_: List[CreateChatCompletion1RouterChatCompletionsInTypedDict]
|
|
1542
1951
|
|
|
1543
1952
|
|
|
1544
1953
|
class CreateChatCompletion1In(BaseModel):
|
|
1545
1954
|
r"""In"""
|
|
1546
1955
|
|
|
1547
|
-
in_: Annotated[
|
|
1956
|
+
in_: Annotated[
|
|
1957
|
+
List[CreateChatCompletion1RouterChatCompletionsIn], pydantic.Field(alias="in")
|
|
1958
|
+
]
|
|
1548
1959
|
|
|
1549
1960
|
|
|
1550
1961
|
class CreateChatCompletion1LteTypedDict(TypedDict):
|
|
@@ -1595,48 +2006,48 @@ class CreateChatCompletion1Gt(BaseModel):
|
|
|
1595
2006
|
gt: float
|
|
1596
2007
|
|
|
1597
2008
|
|
|
1598
|
-
|
|
1599
|
-
"
|
|
2009
|
+
CreateChatCompletion1RouterChatCompletionsNeTypedDict = TypeAliasType(
|
|
2010
|
+
"CreateChatCompletion1RouterChatCompletionsNeTypedDict", Union[str, float, bool]
|
|
1600
2011
|
)
|
|
1601
2012
|
|
|
1602
2013
|
|
|
1603
|
-
|
|
1604
|
-
"
|
|
2014
|
+
CreateChatCompletion1RouterChatCompletionsNe = TypeAliasType(
|
|
2015
|
+
"CreateChatCompletion1RouterChatCompletionsNe", Union[str, float, bool]
|
|
1605
2016
|
)
|
|
1606
2017
|
|
|
1607
2018
|
|
|
1608
2019
|
class CreateChatCompletion1NeTypedDict(TypedDict):
|
|
1609
2020
|
r"""Not equal to"""
|
|
1610
2021
|
|
|
1611
|
-
ne:
|
|
2022
|
+
ne: CreateChatCompletion1RouterChatCompletionsNeTypedDict
|
|
1612
2023
|
|
|
1613
2024
|
|
|
1614
2025
|
class CreateChatCompletion1Ne(BaseModel):
|
|
1615
2026
|
r"""Not equal to"""
|
|
1616
2027
|
|
|
1617
|
-
ne:
|
|
2028
|
+
ne: CreateChatCompletion1RouterChatCompletionsNe
|
|
1618
2029
|
|
|
1619
2030
|
|
|
1620
|
-
|
|
1621
|
-
"
|
|
2031
|
+
CreateChatCompletion1RouterChatCompletionsEqTypedDict = TypeAliasType(
|
|
2032
|
+
"CreateChatCompletion1RouterChatCompletionsEqTypedDict", Union[str, float, bool]
|
|
1622
2033
|
)
|
|
1623
2034
|
|
|
1624
2035
|
|
|
1625
|
-
|
|
1626
|
-
"
|
|
2036
|
+
CreateChatCompletion1RouterChatCompletionsEq = TypeAliasType(
|
|
2037
|
+
"CreateChatCompletion1RouterChatCompletionsEq", Union[str, float, bool]
|
|
1627
2038
|
)
|
|
1628
2039
|
|
|
1629
2040
|
|
|
1630
2041
|
class CreateChatCompletion1EqTypedDict(TypedDict):
|
|
1631
2042
|
r"""Equal to"""
|
|
1632
2043
|
|
|
1633
|
-
eq:
|
|
2044
|
+
eq: CreateChatCompletion1RouterChatCompletionsEqTypedDict
|
|
1634
2045
|
|
|
1635
2046
|
|
|
1636
2047
|
class CreateChatCompletion1Eq(BaseModel):
|
|
1637
2048
|
r"""Equal to"""
|
|
1638
2049
|
|
|
1639
|
-
eq:
|
|
2050
|
+
eq: CreateChatCompletion1RouterChatCompletionsEq
|
|
1640
2051
|
|
|
1641
2052
|
|
|
1642
2053
|
CreateChatCompletionFilterBy1TypedDict = TypeAliasType(
|
|
@@ -1716,6 +2127,22 @@ class CreateChatCompletionSearchOptions(BaseModel):
|
|
|
1716
2127
|
include_scores: Optional[bool] = None
|
|
1717
2128
|
r"""Whether to include the scores in the chunk"""
|
|
1718
2129
|
|
|
2130
|
+
@model_serializer(mode="wrap")
|
|
2131
|
+
def serialize_model(self, handler):
|
|
2132
|
+
optional_fields = set(["include_vectors", "include_metadata", "include_scores"])
|
|
2133
|
+
serialized = handler(self)
|
|
2134
|
+
m = {}
|
|
2135
|
+
|
|
2136
|
+
for n, f in type(self).model_fields.items():
|
|
2137
|
+
k = f.alias or n
|
|
2138
|
+
val = serialized.get(k)
|
|
2139
|
+
|
|
2140
|
+
if val != UNSET_SENTINEL:
|
|
2141
|
+
if val is not None or k not in optional_fields:
|
|
2142
|
+
m[k] = val
|
|
2143
|
+
|
|
2144
|
+
return m
|
|
2145
|
+
|
|
1719
2146
|
|
|
1720
2147
|
class CreateChatCompletionRerankConfigTypedDict(TypedDict):
|
|
1721
2148
|
r"""Override the rerank configuration for this search. If not provided, will use the knowledge base configured rerank settings."""
|
|
@@ -1740,6 +2167,22 @@ class CreateChatCompletionRerankConfig(BaseModel):
|
|
|
1740
2167
|
top_k: Optional[int] = 10
|
|
1741
2168
|
r"""The number of top results to return after reranking. If not provided, will default to the knowledge base configured `top_k`."""
|
|
1742
2169
|
|
|
2170
|
+
@model_serializer(mode="wrap")
|
|
2171
|
+
def serialize_model(self, handler):
|
|
2172
|
+
optional_fields = set(["threshold", "top_k"])
|
|
2173
|
+
serialized = handler(self)
|
|
2174
|
+
m = {}
|
|
2175
|
+
|
|
2176
|
+
for n, f in type(self).model_fields.items():
|
|
2177
|
+
k = f.alias or n
|
|
2178
|
+
val = serialized.get(k)
|
|
2179
|
+
|
|
2180
|
+
if val != UNSET_SENTINEL:
|
|
2181
|
+
if val is not None or k not in optional_fields:
|
|
2182
|
+
m[k] = val
|
|
2183
|
+
|
|
2184
|
+
return m
|
|
2185
|
+
|
|
1743
2186
|
|
|
1744
2187
|
class CreateChatCompletionAgenticRagConfigTypedDict(TypedDict):
|
|
1745
2188
|
r"""Override the agentic RAG configuration for this search. If not provided, will use the knowledge base configured agentic RAG settings."""
|
|
@@ -1804,94 +2247,168 @@ class CreateChatCompletionKnowledgeBases(BaseModel):
|
|
|
1804
2247
|
query: Optional[str] = None
|
|
1805
2248
|
r"""The query to use to search the knowledge base. If not provided we will use the last user message from the messages of the requests"""
|
|
1806
2249
|
|
|
2250
|
+
@model_serializer(mode="wrap")
|
|
2251
|
+
def serialize_model(self, handler):
|
|
2252
|
+
optional_fields = set(
|
|
2253
|
+
[
|
|
2254
|
+
"top_k",
|
|
2255
|
+
"threshold",
|
|
2256
|
+
"search_type",
|
|
2257
|
+
"filter_by",
|
|
2258
|
+
"search_options",
|
|
2259
|
+
"rerank_config",
|
|
2260
|
+
"agentic_rag_config",
|
|
2261
|
+
"query",
|
|
2262
|
+
]
|
|
2263
|
+
)
|
|
2264
|
+
serialized = handler(self)
|
|
2265
|
+
m = {}
|
|
2266
|
+
|
|
2267
|
+
for n, f in type(self).model_fields.items():
|
|
2268
|
+
k = f.alias or n
|
|
2269
|
+
val = serialized.get(k)
|
|
2270
|
+
|
|
2271
|
+
if val != UNSET_SENTINEL:
|
|
2272
|
+
if val is not None or k not in optional_fields:
|
|
2273
|
+
m[k] = val
|
|
2274
|
+
|
|
2275
|
+
return m
|
|
2276
|
+
|
|
1807
2277
|
|
|
1808
|
-
|
|
2278
|
+
CreateChatCompletionLoadBalancerRouterChatCompletionsType = Literal["weight_based",]
|
|
1809
2279
|
|
|
1810
2280
|
|
|
1811
|
-
class
|
|
1812
|
-
type: LoadBalancerType
|
|
2281
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletionsModelsTypedDict(TypedDict):
|
|
1813
2282
|
model: str
|
|
1814
2283
|
r"""Model identifier for load balancing"""
|
|
1815
2284
|
weight: NotRequired[float]
|
|
1816
2285
|
r"""Weight assigned to this model for load balancing"""
|
|
1817
2286
|
|
|
1818
2287
|
|
|
1819
|
-
class
|
|
1820
|
-
type: LoadBalancerType
|
|
1821
|
-
|
|
2288
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletionsModels(BaseModel):
|
|
1822
2289
|
model: str
|
|
1823
2290
|
r"""Model identifier for load balancing"""
|
|
1824
2291
|
|
|
1825
2292
|
weight: Optional[float] = 0.5
|
|
1826
2293
|
r"""Weight assigned to this model for load balancing"""
|
|
1827
2294
|
|
|
2295
|
+
@model_serializer(mode="wrap")
|
|
2296
|
+
def serialize_model(self, handler):
|
|
2297
|
+
optional_fields = set(["weight"])
|
|
2298
|
+
serialized = handler(self)
|
|
2299
|
+
m = {}
|
|
2300
|
+
|
|
2301
|
+
for n, f in type(self).model_fields.items():
|
|
2302
|
+
k = f.alias or n
|
|
2303
|
+
val = serialized.get(k)
|
|
2304
|
+
|
|
2305
|
+
if val != UNSET_SENTINEL:
|
|
2306
|
+
if val is not None or k not in optional_fields:
|
|
2307
|
+
m[k] = val
|
|
2308
|
+
|
|
2309
|
+
return m
|
|
2310
|
+
|
|
2311
|
+
|
|
2312
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletions1TypedDict(TypedDict):
|
|
2313
|
+
type: CreateChatCompletionLoadBalancerRouterChatCompletionsType
|
|
2314
|
+
models: List[CreateChatCompletionLoadBalancerRouterChatCompletionsModelsTypedDict]
|
|
1828
2315
|
|
|
1829
|
-
LoadBalancerTypedDict = LoadBalancer1TypedDict
|
|
1830
2316
|
|
|
2317
|
+
class CreateChatCompletionLoadBalancerRouterChatCompletions1(BaseModel):
|
|
2318
|
+
type: CreateChatCompletionLoadBalancerRouterChatCompletionsType
|
|
1831
2319
|
|
|
1832
|
-
|
|
2320
|
+
models: List[CreateChatCompletionLoadBalancerRouterChatCompletionsModels]
|
|
2321
|
+
|
|
2322
|
+
|
|
2323
|
+
CreateChatCompletionRouterChatCompletionsLoadBalancerTypedDict = (
|
|
2324
|
+
CreateChatCompletionLoadBalancerRouterChatCompletions1TypedDict
|
|
2325
|
+
)
|
|
2326
|
+
r"""Array of models with weights for load balancing requests"""
|
|
1833
2327
|
|
|
1834
2328
|
|
|
1835
|
-
|
|
2329
|
+
CreateChatCompletionRouterChatCompletionsLoadBalancer = (
|
|
2330
|
+
CreateChatCompletionLoadBalancerRouterChatCompletions1
|
|
2331
|
+
)
|
|
2332
|
+
r"""Array of models with weights for load balancing requests"""
|
|
2333
|
+
|
|
2334
|
+
|
|
2335
|
+
class CreateChatCompletionRouterChatCompletionsTimeoutTypedDict(TypedDict):
|
|
1836
2336
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1837
2337
|
|
|
1838
2338
|
call_timeout: float
|
|
1839
2339
|
r"""Timeout value in milliseconds"""
|
|
1840
2340
|
|
|
1841
2341
|
|
|
1842
|
-
class
|
|
2342
|
+
class CreateChatCompletionRouterChatCompletionsTimeout(BaseModel):
|
|
1843
2343
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1844
2344
|
|
|
1845
2345
|
call_timeout: float
|
|
1846
2346
|
r"""Timeout value in milliseconds"""
|
|
1847
2347
|
|
|
1848
2348
|
|
|
2349
|
+
@deprecated(
|
|
2350
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2351
|
+
)
|
|
1849
2352
|
class CreateChatCompletionOrqTypedDict(TypedDict):
|
|
1850
2353
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
1851
2354
|
|
|
1852
2355
|
name: NotRequired[str]
|
|
1853
2356
|
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1854
|
-
retry: NotRequired[
|
|
2357
|
+
retry: NotRequired[CreateChatCompletionRouterChatCompletionsRetryTypedDict]
|
|
1855
2358
|
r"""Retry configuration for the request"""
|
|
1856
|
-
fallbacks: NotRequired[
|
|
2359
|
+
fallbacks: NotRequired[
|
|
2360
|
+
List[CreateChatCompletionRouterChatCompletionsFallbacksTypedDict]
|
|
2361
|
+
]
|
|
1857
2362
|
r"""Array of fallback models to use if primary model fails"""
|
|
1858
2363
|
prompt: NotRequired[PromptTypedDict]
|
|
1859
2364
|
r"""Prompt configuration for the request"""
|
|
1860
|
-
identity: NotRequired[
|
|
2365
|
+
identity: NotRequired[PublicIdentityTypedDict]
|
|
1861
2366
|
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
1862
|
-
contact: NotRequired[
|
|
2367
|
+
contact: NotRequired[PublicContactTypedDict]
|
|
2368
|
+
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
1863
2369
|
thread: NotRequired[CreateChatCompletionThreadTypedDict]
|
|
1864
2370
|
r"""Thread information to group related requests"""
|
|
1865
2371
|
inputs: NotRequired[InputsTypedDict]
|
|
1866
2372
|
r"""Values to replace in the prompt messages using {{variableName}} syntax"""
|
|
1867
|
-
cache: NotRequired[
|
|
2373
|
+
cache: NotRequired[CreateChatCompletionRouterChatCompletionsCacheTypedDict]
|
|
1868
2374
|
r"""Cache configuration for the request."""
|
|
1869
2375
|
knowledge_bases: NotRequired[List[CreateChatCompletionKnowledgeBasesTypedDict]]
|
|
1870
|
-
load_balancer: NotRequired[
|
|
2376
|
+
load_balancer: NotRequired[
|
|
2377
|
+
CreateChatCompletionRouterChatCompletionsLoadBalancerTypedDict
|
|
2378
|
+
]
|
|
1871
2379
|
r"""Array of models with weights for load balancing requests"""
|
|
1872
|
-
timeout: NotRequired[
|
|
2380
|
+
timeout: NotRequired[CreateChatCompletionRouterChatCompletionsTimeoutTypedDict]
|
|
1873
2381
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1874
2382
|
|
|
1875
2383
|
|
|
2384
|
+
@deprecated(
|
|
2385
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2386
|
+
)
|
|
1876
2387
|
class CreateChatCompletionOrq(BaseModel):
|
|
1877
2388
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
1878
2389
|
|
|
1879
2390
|
name: Optional[str] = None
|
|
1880
2391
|
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1881
2392
|
|
|
1882
|
-
retry: Optional[
|
|
2393
|
+
retry: Optional[CreateChatCompletionRouterChatCompletionsRetry] = None
|
|
1883
2394
|
r"""Retry configuration for the request"""
|
|
1884
2395
|
|
|
1885
|
-
fallbacks: Optional[List[
|
|
2396
|
+
fallbacks: Optional[List[CreateChatCompletionRouterChatCompletionsFallbacks]] = None
|
|
1886
2397
|
r"""Array of fallback models to use if primary model fails"""
|
|
1887
2398
|
|
|
1888
2399
|
prompt: Optional[Prompt] = None
|
|
1889
2400
|
r"""Prompt configuration for the request"""
|
|
1890
2401
|
|
|
1891
|
-
identity: Optional[
|
|
2402
|
+
identity: Optional[PublicIdentity] = None
|
|
1892
2403
|
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
1893
2404
|
|
|
1894
|
-
contact:
|
|
2405
|
+
contact: Annotated[
|
|
2406
|
+
Optional[PublicContact],
|
|
2407
|
+
pydantic.Field(
|
|
2408
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2409
|
+
),
|
|
2410
|
+
] = None
|
|
2411
|
+
r"""@deprecated Use identity instead. Information about the contact making the request."""
|
|
1895
2412
|
|
|
1896
2413
|
thread: Optional[CreateChatCompletionThread] = None
|
|
1897
2414
|
r"""Thread information to group related requests"""
|
|
@@ -1899,17 +2416,50 @@ class CreateChatCompletionOrq(BaseModel):
|
|
|
1899
2416
|
inputs: Optional[Inputs] = None
|
|
1900
2417
|
r"""Values to replace in the prompt messages using {{variableName}} syntax"""
|
|
1901
2418
|
|
|
1902
|
-
cache: Optional[
|
|
2419
|
+
cache: Optional[CreateChatCompletionRouterChatCompletionsCache] = None
|
|
1903
2420
|
r"""Cache configuration for the request."""
|
|
1904
2421
|
|
|
1905
2422
|
knowledge_bases: Optional[List[CreateChatCompletionKnowledgeBases]] = None
|
|
1906
2423
|
|
|
1907
|
-
load_balancer: Optional[
|
|
2424
|
+
load_balancer: Optional[CreateChatCompletionRouterChatCompletionsLoadBalancer] = (
|
|
2425
|
+
None
|
|
2426
|
+
)
|
|
1908
2427
|
r"""Array of models with weights for load balancing requests"""
|
|
1909
2428
|
|
|
1910
|
-
timeout: Optional[
|
|
2429
|
+
timeout: Optional[CreateChatCompletionRouterChatCompletionsTimeout] = None
|
|
1911
2430
|
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1912
2431
|
|
|
2432
|
+
@model_serializer(mode="wrap")
|
|
2433
|
+
def serialize_model(self, handler):
|
|
2434
|
+
optional_fields = set(
|
|
2435
|
+
[
|
|
2436
|
+
"name",
|
|
2437
|
+
"retry",
|
|
2438
|
+
"fallbacks",
|
|
2439
|
+
"prompt",
|
|
2440
|
+
"identity",
|
|
2441
|
+
"contact",
|
|
2442
|
+
"thread",
|
|
2443
|
+
"inputs",
|
|
2444
|
+
"cache",
|
|
2445
|
+
"knowledge_bases",
|
|
2446
|
+
"load_balancer",
|
|
2447
|
+
"timeout",
|
|
2448
|
+
]
|
|
2449
|
+
)
|
|
2450
|
+
serialized = handler(self)
|
|
2451
|
+
m = {}
|
|
2452
|
+
|
|
2453
|
+
for n, f in type(self).model_fields.items():
|
|
2454
|
+
k = f.alias or n
|
|
2455
|
+
val = serialized.get(k)
|
|
2456
|
+
|
|
2457
|
+
if val != UNSET_SENTINEL:
|
|
2458
|
+
if val is not None or k not in optional_fields:
|
|
2459
|
+
m[k] = val
|
|
2460
|
+
|
|
2461
|
+
return m
|
|
2462
|
+
|
|
1913
2463
|
|
|
1914
2464
|
class CreateChatCompletionRequestBodyTypedDict(TypedDict):
|
|
1915
2465
|
messages: List[CreateChatCompletionMessagesTypedDict]
|
|
@@ -1918,6 +2468,8 @@ class CreateChatCompletionRequestBodyTypedDict(TypedDict):
|
|
|
1918
2468
|
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`. The AI Gateway offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the (Supported models)[/docs/proxy/supported-models] to browse available models."""
|
|
1919
2469
|
metadata: NotRequired[Dict[str, str]]
|
|
1920
2470
|
r"""Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters."""
|
|
2471
|
+
name: NotRequired[str]
|
|
2472
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1921
2473
|
audio: NotRequired[Nullable[CreateChatCompletionAudioTypedDict]]
|
|
1922
2474
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1923
2475
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -1974,6 +2526,16 @@ class CreateChatCompletionRequestBodyTypedDict(TypedDict):
|
|
|
1974
2526
|
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
1975
2527
|
guardrails: NotRequired[List[CreateChatCompletionGuardrailsTypedDict]]
|
|
1976
2528
|
r"""A list of guardrails to apply to the request."""
|
|
2529
|
+
fallbacks: NotRequired[List[CreateChatCompletionFallbacksTypedDict]]
|
|
2530
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2531
|
+
retry: NotRequired[CreateChatCompletionRetryTypedDict]
|
|
2532
|
+
r"""Retry configuration for the request"""
|
|
2533
|
+
cache: NotRequired[CreateChatCompletionCacheTypedDict]
|
|
2534
|
+
r"""Cache configuration for the request."""
|
|
2535
|
+
load_balancer: NotRequired[CreateChatCompletionLoadBalancerTypedDict]
|
|
2536
|
+
r"""Load balancer configuration for the request."""
|
|
2537
|
+
timeout: NotRequired[CreateChatCompletionTimeoutTypedDict]
|
|
2538
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1977
2539
|
orq: NotRequired[CreateChatCompletionOrqTypedDict]
|
|
1978
2540
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
1979
2541
|
stream: NotRequired[bool]
|
|
@@ -1989,6 +2551,9 @@ class CreateChatCompletionRequestBody(BaseModel):
|
|
|
1989
2551
|
metadata: Optional[Dict[str, str]] = None
|
|
1990
2552
|
r"""Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters."""
|
|
1991
2553
|
|
|
2554
|
+
name: Optional[str] = None
|
|
2555
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
2556
|
+
|
|
1992
2557
|
audio: OptionalNullable[CreateChatCompletionAudio] = UNSET
|
|
1993
2558
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1994
2559
|
|
|
@@ -2068,86 +2633,111 @@ class CreateChatCompletionRequestBody(BaseModel):
|
|
|
2068
2633
|
guardrails: Optional[List[CreateChatCompletionGuardrails]] = None
|
|
2069
2634
|
r"""A list of guardrails to apply to the request."""
|
|
2070
2635
|
|
|
2071
|
-
|
|
2636
|
+
fallbacks: Optional[List[CreateChatCompletionFallbacks]] = None
|
|
2637
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2638
|
+
|
|
2639
|
+
retry: Optional[CreateChatCompletionRetry] = None
|
|
2640
|
+
r"""Retry configuration for the request"""
|
|
2641
|
+
|
|
2642
|
+
cache: Optional[CreateChatCompletionCache] = None
|
|
2643
|
+
r"""Cache configuration for the request."""
|
|
2644
|
+
|
|
2645
|
+
load_balancer: Optional[CreateChatCompletionLoadBalancer] = None
|
|
2646
|
+
r"""Load balancer configuration for the request."""
|
|
2647
|
+
|
|
2648
|
+
timeout: Optional[CreateChatCompletionTimeout] = None
|
|
2649
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2650
|
+
|
|
2651
|
+
orq: Annotated[
|
|
2652
|
+
Optional[CreateChatCompletionOrq],
|
|
2653
|
+
pydantic.Field(
|
|
2654
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2655
|
+
),
|
|
2656
|
+
] = None
|
|
2072
2657
|
r"""Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution."""
|
|
2073
2658
|
|
|
2074
2659
|
stream: Optional[bool] = False
|
|
2075
2660
|
|
|
2076
2661
|
@model_serializer(mode="wrap")
|
|
2077
2662
|
def serialize_model(self, handler):
|
|
2078
|
-
optional_fields =
|
|
2079
|
-
|
|
2080
|
-
|
|
2081
|
-
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
|
-
|
|
2110
|
-
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
|
-
|
|
2119
|
-
|
|
2120
|
-
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
|
|
2663
|
+
optional_fields = set(
|
|
2664
|
+
[
|
|
2665
|
+
"metadata",
|
|
2666
|
+
"name",
|
|
2667
|
+
"audio",
|
|
2668
|
+
"frequency_penalty",
|
|
2669
|
+
"max_tokens",
|
|
2670
|
+
"max_completion_tokens",
|
|
2671
|
+
"logprobs",
|
|
2672
|
+
"top_logprobs",
|
|
2673
|
+
"n",
|
|
2674
|
+
"presence_penalty",
|
|
2675
|
+
"response_format",
|
|
2676
|
+
"reasoning_effort",
|
|
2677
|
+
"verbosity",
|
|
2678
|
+
"seed",
|
|
2679
|
+
"stop",
|
|
2680
|
+
"stream_options",
|
|
2681
|
+
"thinking",
|
|
2682
|
+
"temperature",
|
|
2683
|
+
"top_p",
|
|
2684
|
+
"top_k",
|
|
2685
|
+
"tools",
|
|
2686
|
+
"tool_choice",
|
|
2687
|
+
"parallel_tool_calls",
|
|
2688
|
+
"modalities",
|
|
2689
|
+
"guardrails",
|
|
2690
|
+
"fallbacks",
|
|
2691
|
+
"retry",
|
|
2692
|
+
"cache",
|
|
2693
|
+
"load_balancer",
|
|
2694
|
+
"timeout",
|
|
2695
|
+
"orq",
|
|
2696
|
+
"stream",
|
|
2697
|
+
]
|
|
2698
|
+
)
|
|
2699
|
+
nullable_fields = set(
|
|
2700
|
+
[
|
|
2701
|
+
"audio",
|
|
2702
|
+
"frequency_penalty",
|
|
2703
|
+
"max_tokens",
|
|
2704
|
+
"max_completion_tokens",
|
|
2705
|
+
"logprobs",
|
|
2706
|
+
"top_logprobs",
|
|
2707
|
+
"n",
|
|
2708
|
+
"presence_penalty",
|
|
2709
|
+
"seed",
|
|
2710
|
+
"stop",
|
|
2711
|
+
"stream_options",
|
|
2712
|
+
"temperature",
|
|
2713
|
+
"top_p",
|
|
2714
|
+
"top_k",
|
|
2715
|
+
"modalities",
|
|
2716
|
+
]
|
|
2717
|
+
)
|
|
2125
2718
|
serialized = handler(self)
|
|
2126
|
-
|
|
2127
2719
|
m = {}
|
|
2128
2720
|
|
|
2129
2721
|
for n, f in type(self).model_fields.items():
|
|
2130
2722
|
k = f.alias or n
|
|
2131
2723
|
val = serialized.get(k)
|
|
2132
|
-
|
|
2133
|
-
|
|
2134
|
-
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
|
|
2144
|
-
):
|
|
2145
|
-
m[k] = val
|
|
2724
|
+
is_nullable_and_explicitly_set = (
|
|
2725
|
+
k in nullable_fields
|
|
2726
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2727
|
+
)
|
|
2728
|
+
|
|
2729
|
+
if val != UNSET_SENTINEL:
|
|
2730
|
+
if (
|
|
2731
|
+
val is not None
|
|
2732
|
+
or k not in optional_fields
|
|
2733
|
+
or is_nullable_and_explicitly_set
|
|
2734
|
+
):
|
|
2735
|
+
m[k] = val
|
|
2146
2736
|
|
|
2147
2737
|
return m
|
|
2148
2738
|
|
|
2149
2739
|
|
|
2150
|
-
|
|
2740
|
+
CreateChatCompletionRouterChatCompletionsFinishReason = Literal[
|
|
2151
2741
|
"stop",
|
|
2152
2742
|
"length",
|
|
2153
2743
|
"tool_calls",
|
|
@@ -2157,7 +2747,7 @@ CreateChatCompletionRouterFinishReason = Literal[
|
|
|
2157
2747
|
r"""The reason the model stopped generating tokens."""
|
|
2158
2748
|
|
|
2159
2749
|
|
|
2160
|
-
class
|
|
2750
|
+
class CreateChatCompletionRouterChatCompletionsResponseTopLogprobsTypedDict(TypedDict):
|
|
2161
2751
|
token: str
|
|
2162
2752
|
r"""The token."""
|
|
2163
2753
|
logprob: float
|
|
@@ -2166,7 +2756,7 @@ class CreateChatCompletionRouterResponseTopLogprobsTypedDict(TypedDict):
|
|
|
2166
2756
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2167
2757
|
|
|
2168
2758
|
|
|
2169
|
-
class
|
|
2759
|
+
class CreateChatCompletionRouterChatCompletionsResponseTopLogprobs(BaseModel):
|
|
2170
2760
|
token: str
|
|
2171
2761
|
r"""The token."""
|
|
2172
2762
|
|
|
@@ -2178,47 +2768,33 @@ class CreateChatCompletionRouterResponseTopLogprobs(BaseModel):
|
|
|
2178
2768
|
|
|
2179
2769
|
@model_serializer(mode="wrap")
|
|
2180
2770
|
def serialize_model(self, handler):
|
|
2181
|
-
optional_fields = []
|
|
2182
|
-
nullable_fields = ["bytes"]
|
|
2183
|
-
null_default_fields = []
|
|
2184
|
-
|
|
2185
2771
|
serialized = handler(self)
|
|
2186
|
-
|
|
2187
2772
|
m = {}
|
|
2188
2773
|
|
|
2189
2774
|
for n, f in type(self).model_fields.items():
|
|
2190
2775
|
k = f.alias or n
|
|
2191
2776
|
val = serialized.get(k)
|
|
2192
|
-
serialized.pop(k, None)
|
|
2193
|
-
|
|
2194
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
2195
|
-
is_set = (
|
|
2196
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2197
|
-
or k in null_default_fields
|
|
2198
|
-
) # pylint: disable=no-member
|
|
2199
2777
|
|
|
2200
|
-
if val
|
|
2201
|
-
m[k] = val
|
|
2202
|
-
elif val != UNSET_SENTINEL and (
|
|
2203
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2204
|
-
):
|
|
2778
|
+
if val != UNSET_SENTINEL:
|
|
2205
2779
|
m[k] = val
|
|
2206
2780
|
|
|
2207
2781
|
return m
|
|
2208
2782
|
|
|
2209
2783
|
|
|
2210
|
-
class
|
|
2784
|
+
class CreateChatCompletionRouterChatCompletionsContentTypedDict(TypedDict):
|
|
2211
2785
|
token: str
|
|
2212
2786
|
r"""The token."""
|
|
2213
2787
|
logprob: float
|
|
2214
2788
|
r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify that the token is very unlikely."""
|
|
2215
2789
|
bytes_: Nullable[List[float]]
|
|
2216
2790
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2217
|
-
top_logprobs: List[
|
|
2791
|
+
top_logprobs: List[
|
|
2792
|
+
CreateChatCompletionRouterChatCompletionsResponseTopLogprobsTypedDict
|
|
2793
|
+
]
|
|
2218
2794
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2219
2795
|
|
|
2220
2796
|
|
|
2221
|
-
class
|
|
2797
|
+
class CreateChatCompletionRouterChatCompletionsContent(BaseModel):
|
|
2222
2798
|
token: str
|
|
2223
2799
|
r"""The token."""
|
|
2224
2800
|
|
|
@@ -2228,41 +2804,27 @@ class CreateChatCompletionRouterContent(BaseModel):
|
|
|
2228
2804
|
bytes_: Annotated[Nullable[List[float]], pydantic.Field(alias="bytes")]
|
|
2229
2805
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2230
2806
|
|
|
2231
|
-
top_logprobs: List[
|
|
2807
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsResponseTopLogprobs]
|
|
2232
2808
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2233
2809
|
|
|
2234
2810
|
@model_serializer(mode="wrap")
|
|
2235
2811
|
def serialize_model(self, handler):
|
|
2236
|
-
optional_fields = []
|
|
2237
|
-
nullable_fields = ["bytes"]
|
|
2238
|
-
null_default_fields = []
|
|
2239
|
-
|
|
2240
2812
|
serialized = handler(self)
|
|
2241
|
-
|
|
2242
2813
|
m = {}
|
|
2243
2814
|
|
|
2244
2815
|
for n, f in type(self).model_fields.items():
|
|
2245
2816
|
k = f.alias or n
|
|
2246
2817
|
val = serialized.get(k)
|
|
2247
|
-
serialized.pop(k, None)
|
|
2248
2818
|
|
|
2249
|
-
|
|
2250
|
-
is_set = (
|
|
2251
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2252
|
-
or k in null_default_fields
|
|
2253
|
-
) # pylint: disable=no-member
|
|
2254
|
-
|
|
2255
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
2256
|
-
m[k] = val
|
|
2257
|
-
elif val != UNSET_SENTINEL and (
|
|
2258
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2259
|
-
):
|
|
2819
|
+
if val != UNSET_SENTINEL:
|
|
2260
2820
|
m[k] = val
|
|
2261
2821
|
|
|
2262
2822
|
return m
|
|
2263
2823
|
|
|
2264
2824
|
|
|
2265
|
-
class
|
|
2825
|
+
class CreateChatCompletionRouterChatCompletionsResponse200TopLogprobsTypedDict(
|
|
2826
|
+
TypedDict
|
|
2827
|
+
):
|
|
2266
2828
|
token: str
|
|
2267
2829
|
r"""The token."""
|
|
2268
2830
|
logprob: float
|
|
@@ -2271,7 +2833,7 @@ class CreateChatCompletionRouterResponse200TopLogprobsTypedDict(TypedDict):
|
|
|
2271
2833
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2272
2834
|
|
|
2273
2835
|
|
|
2274
|
-
class
|
|
2836
|
+
class CreateChatCompletionRouterChatCompletionsResponse200TopLogprobs(BaseModel):
|
|
2275
2837
|
token: str
|
|
2276
2838
|
r"""The token."""
|
|
2277
2839
|
|
|
@@ -2283,47 +2845,33 @@ class CreateChatCompletionRouterResponse200TopLogprobs(BaseModel):
|
|
|
2283
2845
|
|
|
2284
2846
|
@model_serializer(mode="wrap")
|
|
2285
2847
|
def serialize_model(self, handler):
|
|
2286
|
-
optional_fields = []
|
|
2287
|
-
nullable_fields = ["bytes"]
|
|
2288
|
-
null_default_fields = []
|
|
2289
|
-
|
|
2290
2848
|
serialized = handler(self)
|
|
2291
|
-
|
|
2292
2849
|
m = {}
|
|
2293
2850
|
|
|
2294
2851
|
for n, f in type(self).model_fields.items():
|
|
2295
2852
|
k = f.alias or n
|
|
2296
2853
|
val = serialized.get(k)
|
|
2297
|
-
serialized.pop(k, None)
|
|
2298
2854
|
|
|
2299
|
-
|
|
2300
|
-
is_set = (
|
|
2301
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2302
|
-
or k in null_default_fields
|
|
2303
|
-
) # pylint: disable=no-member
|
|
2304
|
-
|
|
2305
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
2306
|
-
m[k] = val
|
|
2307
|
-
elif val != UNSET_SENTINEL and (
|
|
2308
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2309
|
-
):
|
|
2855
|
+
if val != UNSET_SENTINEL:
|
|
2310
2856
|
m[k] = val
|
|
2311
2857
|
|
|
2312
2858
|
return m
|
|
2313
2859
|
|
|
2314
2860
|
|
|
2315
|
-
class
|
|
2861
|
+
class CreateChatCompletionRouterChatCompletionsRefusalTypedDict(TypedDict):
|
|
2316
2862
|
token: str
|
|
2317
2863
|
r"""The token."""
|
|
2318
2864
|
logprob: float
|
|
2319
2865
|
r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify that the token is very unlikely."""
|
|
2320
2866
|
bytes_: Nullable[List[float]]
|
|
2321
2867
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2322
|
-
top_logprobs: List[
|
|
2868
|
+
top_logprobs: List[
|
|
2869
|
+
CreateChatCompletionRouterChatCompletionsResponse200TopLogprobsTypedDict
|
|
2870
|
+
]
|
|
2323
2871
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2324
2872
|
|
|
2325
2873
|
|
|
2326
|
-
class
|
|
2874
|
+
class CreateChatCompletionRouterChatCompletionsRefusal(BaseModel):
|
|
2327
2875
|
token: str
|
|
2328
2876
|
r"""The token."""
|
|
2329
2877
|
|
|
@@ -2333,140 +2881,142 @@ class CreateChatCompletionRouterRefusal(BaseModel):
|
|
|
2333
2881
|
bytes_: Annotated[Nullable[List[float]], pydantic.Field(alias="bytes")]
|
|
2334
2882
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
2335
2883
|
|
|
2336
|
-
top_logprobs: List[
|
|
2884
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsResponse200TopLogprobs]
|
|
2337
2885
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
2338
2886
|
|
|
2339
2887
|
@model_serializer(mode="wrap")
|
|
2340
2888
|
def serialize_model(self, handler):
|
|
2341
|
-
optional_fields = []
|
|
2342
|
-
nullable_fields = ["bytes"]
|
|
2343
|
-
null_default_fields = []
|
|
2344
|
-
|
|
2345
2889
|
serialized = handler(self)
|
|
2346
|
-
|
|
2347
2890
|
m = {}
|
|
2348
2891
|
|
|
2349
2892
|
for n, f in type(self).model_fields.items():
|
|
2350
2893
|
k = f.alias or n
|
|
2351
2894
|
val = serialized.get(k)
|
|
2352
|
-
serialized.pop(k, None)
|
|
2353
|
-
|
|
2354
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
2355
|
-
is_set = (
|
|
2356
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2357
|
-
or k in null_default_fields
|
|
2358
|
-
) # pylint: disable=no-member
|
|
2359
2895
|
|
|
2360
|
-
if val
|
|
2361
|
-
m[k] = val
|
|
2362
|
-
elif val != UNSET_SENTINEL and (
|
|
2363
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2364
|
-
):
|
|
2896
|
+
if val != UNSET_SENTINEL:
|
|
2365
2897
|
m[k] = val
|
|
2366
2898
|
|
|
2367
2899
|
return m
|
|
2368
2900
|
|
|
2369
2901
|
|
|
2370
|
-
class
|
|
2902
|
+
class CreateChatCompletionRouterChatCompletionsLogprobsTypedDict(TypedDict):
|
|
2371
2903
|
r"""Log probability information for the choice."""
|
|
2372
2904
|
|
|
2373
|
-
content: Nullable[List[
|
|
2905
|
+
content: Nullable[List[CreateChatCompletionRouterChatCompletionsContentTypedDict]]
|
|
2374
2906
|
r"""A list of message content tokens with log probability information."""
|
|
2375
|
-
refusal: Nullable[List[
|
|
2907
|
+
refusal: Nullable[List[CreateChatCompletionRouterChatCompletionsRefusalTypedDict]]
|
|
2376
2908
|
r"""A list of message refusal tokens with log probability information."""
|
|
2377
2909
|
|
|
2378
2910
|
|
|
2379
|
-
class
|
|
2911
|
+
class CreateChatCompletionRouterChatCompletionsLogprobs(BaseModel):
|
|
2380
2912
|
r"""Log probability information for the choice."""
|
|
2381
2913
|
|
|
2382
|
-
content: Nullable[List[
|
|
2914
|
+
content: Nullable[List[CreateChatCompletionRouterChatCompletionsContent]]
|
|
2383
2915
|
r"""A list of message content tokens with log probability information."""
|
|
2384
2916
|
|
|
2385
|
-
refusal: Nullable[List[
|
|
2917
|
+
refusal: Nullable[List[CreateChatCompletionRouterChatCompletionsRefusal]]
|
|
2386
2918
|
r"""A list of message refusal tokens with log probability information."""
|
|
2387
2919
|
|
|
2388
2920
|
@model_serializer(mode="wrap")
|
|
2389
2921
|
def serialize_model(self, handler):
|
|
2390
|
-
optional_fields = []
|
|
2391
|
-
nullable_fields = ["content", "refusal"]
|
|
2392
|
-
null_default_fields = []
|
|
2393
|
-
|
|
2394
2922
|
serialized = handler(self)
|
|
2395
|
-
|
|
2396
2923
|
m = {}
|
|
2397
2924
|
|
|
2398
2925
|
for n, f in type(self).model_fields.items():
|
|
2399
2926
|
k = f.alias or n
|
|
2400
2927
|
val = serialized.get(k)
|
|
2401
|
-
serialized.pop(k, None)
|
|
2402
|
-
|
|
2403
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
2404
|
-
is_set = (
|
|
2405
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2406
|
-
or k in null_default_fields
|
|
2407
|
-
) # pylint: disable=no-member
|
|
2408
2928
|
|
|
2409
|
-
if val
|
|
2410
|
-
m[k] = val
|
|
2411
|
-
elif val != UNSET_SENTINEL and (
|
|
2412
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2413
|
-
):
|
|
2929
|
+
if val != UNSET_SENTINEL:
|
|
2414
2930
|
m[k] = val
|
|
2415
2931
|
|
|
2416
2932
|
return m
|
|
2417
2933
|
|
|
2418
2934
|
|
|
2419
|
-
|
|
2935
|
+
CreateChatCompletionRouterChatCompletionsResponse200Type = Literal["function",]
|
|
2420
2936
|
r"""The type of the tool. Currently, only `function` is supported."""
|
|
2421
2937
|
|
|
2422
2938
|
|
|
2423
|
-
class
|
|
2939
|
+
class CreateChatCompletionRouterChatCompletionsResponseFunctionTypedDict(TypedDict):
|
|
2424
2940
|
name: NotRequired[str]
|
|
2425
2941
|
r"""The name of the function."""
|
|
2426
2942
|
arguments: NotRequired[str]
|
|
2427
2943
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2428
2944
|
|
|
2429
2945
|
|
|
2430
|
-
class
|
|
2946
|
+
class CreateChatCompletionRouterChatCompletionsResponseFunction(BaseModel):
|
|
2431
2947
|
name: Optional[str] = None
|
|
2432
2948
|
r"""The name of the function."""
|
|
2433
2949
|
|
|
2434
2950
|
arguments: Optional[str] = None
|
|
2435
2951
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2436
2952
|
|
|
2953
|
+
@model_serializer(mode="wrap")
|
|
2954
|
+
def serialize_model(self, handler):
|
|
2955
|
+
optional_fields = set(["name", "arguments"])
|
|
2956
|
+
serialized = handler(self)
|
|
2957
|
+
m = {}
|
|
2958
|
+
|
|
2959
|
+
for n, f in type(self).model_fields.items():
|
|
2960
|
+
k = f.alias or n
|
|
2961
|
+
val = serialized.get(k)
|
|
2962
|
+
|
|
2963
|
+
if val != UNSET_SENTINEL:
|
|
2964
|
+
if val is not None or k not in optional_fields:
|
|
2965
|
+
m[k] = val
|
|
2437
2966
|
|
|
2438
|
-
|
|
2967
|
+
return m
|
|
2968
|
+
|
|
2969
|
+
|
|
2970
|
+
class CreateChatCompletionRouterChatCompletionsToolCallsTypedDict(TypedDict):
|
|
2439
2971
|
index: NotRequired[float]
|
|
2440
2972
|
r"""The index of the tool call."""
|
|
2441
2973
|
id: NotRequired[str]
|
|
2442
2974
|
r"""The ID of the tool call."""
|
|
2443
|
-
type: NotRequired[
|
|
2975
|
+
type: NotRequired[CreateChatCompletionRouterChatCompletionsResponse200Type]
|
|
2444
2976
|
r"""The type of the tool. Currently, only `function` is supported."""
|
|
2445
|
-
function: NotRequired[
|
|
2977
|
+
function: NotRequired[
|
|
2978
|
+
CreateChatCompletionRouterChatCompletionsResponseFunctionTypedDict
|
|
2979
|
+
]
|
|
2446
2980
|
thought_signature: NotRequired[str]
|
|
2447
2981
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models."""
|
|
2448
2982
|
|
|
2449
2983
|
|
|
2450
|
-
class
|
|
2984
|
+
class CreateChatCompletionRouterChatCompletionsToolCalls(BaseModel):
|
|
2451
2985
|
index: Optional[float] = None
|
|
2452
2986
|
r"""The index of the tool call."""
|
|
2453
2987
|
|
|
2454
2988
|
id: Optional[str] = None
|
|
2455
2989
|
r"""The ID of the tool call."""
|
|
2456
2990
|
|
|
2457
|
-
type: Optional[
|
|
2991
|
+
type: Optional[CreateChatCompletionRouterChatCompletionsResponse200Type] = None
|
|
2458
2992
|
r"""The type of the tool. Currently, only `function` is supported."""
|
|
2459
2993
|
|
|
2460
|
-
function: Optional[
|
|
2994
|
+
function: Optional[CreateChatCompletionRouterChatCompletionsResponseFunction] = None
|
|
2461
2995
|
|
|
2462
2996
|
thought_signature: Optional[str] = None
|
|
2463
2997
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models."""
|
|
2464
2998
|
|
|
2999
|
+
@model_serializer(mode="wrap")
|
|
3000
|
+
def serialize_model(self, handler):
|
|
3001
|
+
optional_fields = set(["index", "id", "type", "function", "thought_signature"])
|
|
3002
|
+
serialized = handler(self)
|
|
3003
|
+
m = {}
|
|
3004
|
+
|
|
3005
|
+
for n, f in type(self).model_fields.items():
|
|
3006
|
+
k = f.alias or n
|
|
3007
|
+
val = serialized.get(k)
|
|
2465
3008
|
|
|
2466
|
-
|
|
3009
|
+
if val != UNSET_SENTINEL:
|
|
3010
|
+
if val is not None or k not in optional_fields:
|
|
3011
|
+
m[k] = val
|
|
3012
|
+
|
|
3013
|
+
return m
|
|
3014
|
+
|
|
3015
|
+
|
|
3016
|
+
CreateChatCompletionRouterChatCompletionsRole = Literal["assistant",]
|
|
2467
3017
|
|
|
2468
3018
|
|
|
2469
|
-
class
|
|
3019
|
+
class CreateChatCompletionRouterChatCompletionsResponseAudioTypedDict(TypedDict):
|
|
2470
3020
|
r"""Audio response data in streaming mode."""
|
|
2471
3021
|
|
|
2472
3022
|
id: NotRequired[str]
|
|
@@ -2475,7 +3025,7 @@ class CreateChatCompletionRouterResponseAudioTypedDict(TypedDict):
|
|
|
2475
3025
|
expires_at: NotRequired[int]
|
|
2476
3026
|
|
|
2477
3027
|
|
|
2478
|
-
class
|
|
3028
|
+
class CreateChatCompletionRouterChatCompletionsResponseAudio(BaseModel):
|
|
2479
3029
|
r"""Audio response data in streaming mode."""
|
|
2480
3030
|
|
|
2481
3031
|
id: Optional[str] = None
|
|
@@ -2486,6 +3036,22 @@ class CreateChatCompletionRouterResponseAudio(BaseModel):
|
|
|
2486
3036
|
|
|
2487
3037
|
expires_at: Optional[int] = None
|
|
2488
3038
|
|
|
3039
|
+
@model_serializer(mode="wrap")
|
|
3040
|
+
def serialize_model(self, handler):
|
|
3041
|
+
optional_fields = set(["id", "transcript", "data", "expires_at"])
|
|
3042
|
+
serialized = handler(self)
|
|
3043
|
+
m = {}
|
|
3044
|
+
|
|
3045
|
+
for n, f in type(self).model_fields.items():
|
|
3046
|
+
k = f.alias or n
|
|
3047
|
+
val = serialized.get(k)
|
|
3048
|
+
|
|
3049
|
+
if val != UNSET_SENTINEL:
|
|
3050
|
+
if val is not None or k not in optional_fields:
|
|
3051
|
+
m[k] = val
|
|
3052
|
+
|
|
3053
|
+
return m
|
|
3054
|
+
|
|
2489
3055
|
|
|
2490
3056
|
class DeltaTypedDict(TypedDict):
|
|
2491
3057
|
r"""A chat completion delta generated by streamed model responses."""
|
|
@@ -2493,15 +3059,19 @@ class DeltaTypedDict(TypedDict):
|
|
|
2493
3059
|
content: NotRequired[Nullable[str]]
|
|
2494
3060
|
r"""The contents of the chunk message."""
|
|
2495
3061
|
refusal: NotRequired[Nullable[str]]
|
|
2496
|
-
tool_calls: NotRequired[
|
|
2497
|
-
|
|
3062
|
+
tool_calls: NotRequired[
|
|
3063
|
+
List[CreateChatCompletionRouterChatCompletionsToolCallsTypedDict]
|
|
3064
|
+
]
|
|
3065
|
+
role: NotRequired[CreateChatCompletionRouterChatCompletionsRole]
|
|
2498
3066
|
reasoning: NotRequired[str]
|
|
2499
3067
|
r"""Internal thought process of the model"""
|
|
2500
3068
|
reasoning_signature: NotRequired[str]
|
|
2501
3069
|
r"""The signature holds a cryptographic token which verifies that the thinking block was generated by the model, and is verified when thinking is part of a multiturn conversation. This value should not be modified and should always be sent to the API when the reasoning is redacted. Currently only supported by `Anthropic`."""
|
|
2502
3070
|
redacted_reasoning: NotRequired[str]
|
|
2503
3071
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
2504
|
-
audio: NotRequired[
|
|
3072
|
+
audio: NotRequired[
|
|
3073
|
+
Nullable[CreateChatCompletionRouterChatCompletionsResponseAudioTypedDict]
|
|
3074
|
+
]
|
|
2505
3075
|
r"""Audio response data in streaming mode."""
|
|
2506
3076
|
|
|
2507
3077
|
|
|
@@ -2513,9 +3083,11 @@ class Delta(BaseModel):
|
|
|
2513
3083
|
|
|
2514
3084
|
refusal: OptionalNullable[str] = UNSET
|
|
2515
3085
|
|
|
2516
|
-
tool_calls: Optional[List[
|
|
3086
|
+
tool_calls: Optional[List[CreateChatCompletionRouterChatCompletionsToolCalls]] = (
|
|
3087
|
+
None
|
|
3088
|
+
)
|
|
2517
3089
|
|
|
2518
|
-
role: Optional[
|
|
3090
|
+
role: Optional[CreateChatCompletionRouterChatCompletionsRole] = None
|
|
2519
3091
|
|
|
2520
3092
|
reasoning: Optional[str] = None
|
|
2521
3093
|
r"""Internal thought process of the model"""
|
|
@@ -2526,62 +3098,63 @@ class Delta(BaseModel):
|
|
|
2526
3098
|
redacted_reasoning: Optional[str] = None
|
|
2527
3099
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
2528
3100
|
|
|
2529
|
-
audio: OptionalNullable[
|
|
3101
|
+
audio: OptionalNullable[CreateChatCompletionRouterChatCompletionsResponseAudio] = (
|
|
3102
|
+
UNSET
|
|
3103
|
+
)
|
|
2530
3104
|
r"""Audio response data in streaming mode."""
|
|
2531
3105
|
|
|
2532
3106
|
@model_serializer(mode="wrap")
|
|
2533
3107
|
def serialize_model(self, handler):
|
|
2534
|
-
optional_fields =
|
|
2535
|
-
|
|
2536
|
-
|
|
2537
|
-
|
|
2538
|
-
|
|
2539
|
-
|
|
2540
|
-
|
|
2541
|
-
|
|
2542
|
-
|
|
2543
|
-
|
|
2544
|
-
|
|
2545
|
-
|
|
2546
|
-
|
|
3108
|
+
optional_fields = set(
|
|
3109
|
+
[
|
|
3110
|
+
"content",
|
|
3111
|
+
"refusal",
|
|
3112
|
+
"tool_calls",
|
|
3113
|
+
"role",
|
|
3114
|
+
"reasoning",
|
|
3115
|
+
"reasoning_signature",
|
|
3116
|
+
"redacted_reasoning",
|
|
3117
|
+
"audio",
|
|
3118
|
+
]
|
|
3119
|
+
)
|
|
3120
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
2547
3121
|
serialized = handler(self)
|
|
2548
|
-
|
|
2549
3122
|
m = {}
|
|
2550
3123
|
|
|
2551
3124
|
for n, f in type(self).model_fields.items():
|
|
2552
3125
|
k = f.alias or n
|
|
2553
3126
|
val = serialized.get(k)
|
|
2554
|
-
|
|
2555
|
-
|
|
2556
|
-
|
|
2557
|
-
|
|
2558
|
-
|
|
2559
|
-
|
|
2560
|
-
|
|
2561
|
-
|
|
2562
|
-
|
|
2563
|
-
|
|
2564
|
-
|
|
2565
|
-
|
|
2566
|
-
):
|
|
2567
|
-
m[k] = val
|
|
3127
|
+
is_nullable_and_explicitly_set = (
|
|
3128
|
+
k in nullable_fields
|
|
3129
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3130
|
+
)
|
|
3131
|
+
|
|
3132
|
+
if val != UNSET_SENTINEL:
|
|
3133
|
+
if (
|
|
3134
|
+
val is not None
|
|
3135
|
+
or k not in optional_fields
|
|
3136
|
+
or is_nullable_and_explicitly_set
|
|
3137
|
+
):
|
|
3138
|
+
m[k] = val
|
|
2568
3139
|
|
|
2569
3140
|
return m
|
|
2570
3141
|
|
|
2571
3142
|
|
|
2572
|
-
class
|
|
2573
|
-
finish_reason: Nullable[
|
|
3143
|
+
class CreateChatCompletionRouterChatCompletionsChoicesTypedDict(TypedDict):
|
|
3144
|
+
finish_reason: Nullable[CreateChatCompletionRouterChatCompletionsFinishReason]
|
|
2574
3145
|
r"""The reason the model stopped generating tokens."""
|
|
2575
3146
|
delta: DeltaTypedDict
|
|
2576
3147
|
r"""A chat completion delta generated by streamed model responses."""
|
|
2577
3148
|
index: NotRequired[float]
|
|
2578
3149
|
r"""The index of the choice in the list of choices."""
|
|
2579
|
-
logprobs: NotRequired[
|
|
3150
|
+
logprobs: NotRequired[
|
|
3151
|
+
Nullable[CreateChatCompletionRouterChatCompletionsLogprobsTypedDict]
|
|
3152
|
+
]
|
|
2580
3153
|
r"""Log probability information for the choice."""
|
|
2581
3154
|
|
|
2582
3155
|
|
|
2583
|
-
class
|
|
2584
|
-
finish_reason: Nullable[
|
|
3156
|
+
class CreateChatCompletionRouterChatCompletionsChoices(BaseModel):
|
|
3157
|
+
finish_reason: Nullable[CreateChatCompletionRouterChatCompletionsFinishReason]
|
|
2585
3158
|
r"""The reason the model stopped generating tokens."""
|
|
2586
3159
|
|
|
2587
3160
|
delta: Delta
|
|
@@ -2590,48 +3163,45 @@ class CreateChatCompletionRouterChoices(BaseModel):
|
|
|
2590
3163
|
index: Optional[float] = 0
|
|
2591
3164
|
r"""The index of the choice in the list of choices."""
|
|
2592
3165
|
|
|
2593
|
-
logprobs: OptionalNullable[
|
|
3166
|
+
logprobs: OptionalNullable[CreateChatCompletionRouterChatCompletionsLogprobs] = (
|
|
3167
|
+
UNSET
|
|
3168
|
+
)
|
|
2594
3169
|
r"""Log probability information for the choice."""
|
|
2595
3170
|
|
|
2596
3171
|
@model_serializer(mode="wrap")
|
|
2597
3172
|
def serialize_model(self, handler):
|
|
2598
|
-
optional_fields = ["index", "logprobs"]
|
|
2599
|
-
nullable_fields = ["finish_reason", "logprobs"]
|
|
2600
|
-
null_default_fields = []
|
|
2601
|
-
|
|
3173
|
+
optional_fields = set(["index", "logprobs"])
|
|
3174
|
+
nullable_fields = set(["finish_reason", "logprobs"])
|
|
2602
3175
|
serialized = handler(self)
|
|
2603
|
-
|
|
2604
3176
|
m = {}
|
|
2605
3177
|
|
|
2606
3178
|
for n, f in type(self).model_fields.items():
|
|
2607
3179
|
k = f.alias or n
|
|
2608
3180
|
val = serialized.get(k)
|
|
2609
|
-
|
|
2610
|
-
|
|
2611
|
-
|
|
2612
|
-
|
|
2613
|
-
|
|
2614
|
-
|
|
2615
|
-
|
|
2616
|
-
|
|
2617
|
-
|
|
2618
|
-
|
|
2619
|
-
|
|
2620
|
-
|
|
2621
|
-
):
|
|
2622
|
-
m[k] = val
|
|
3181
|
+
is_nullable_and_explicitly_set = (
|
|
3182
|
+
k in nullable_fields
|
|
3183
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3184
|
+
)
|
|
3185
|
+
|
|
3186
|
+
if val != UNSET_SENTINEL:
|
|
3187
|
+
if (
|
|
3188
|
+
val is not None
|
|
3189
|
+
or k not in optional_fields
|
|
3190
|
+
or is_nullable_and_explicitly_set
|
|
3191
|
+
):
|
|
3192
|
+
m[k] = val
|
|
2623
3193
|
|
|
2624
3194
|
return m
|
|
2625
3195
|
|
|
2626
3196
|
|
|
2627
|
-
class
|
|
3197
|
+
class CreateChatCompletionRouterChatCompletionsPromptTokensDetailsTypedDict(TypedDict):
|
|
2628
3198
|
cached_tokens: NotRequired[Nullable[int]]
|
|
2629
3199
|
cache_creation_tokens: NotRequired[Nullable[int]]
|
|
2630
3200
|
audio_tokens: NotRequired[Nullable[int]]
|
|
2631
3201
|
r"""The number of audio input tokens consumed by the request."""
|
|
2632
3202
|
|
|
2633
3203
|
|
|
2634
|
-
class
|
|
3204
|
+
class CreateChatCompletionRouterChatCompletionsPromptTokensDetails(BaseModel):
|
|
2635
3205
|
cached_tokens: OptionalNullable[int] = UNSET
|
|
2636
3206
|
|
|
2637
3207
|
cache_creation_tokens: OptionalNullable[int] = UNSET
|
|
@@ -2641,36 +3211,37 @@ class CreateChatCompletionRouterPromptTokensDetails(BaseModel):
|
|
|
2641
3211
|
|
|
2642
3212
|
@model_serializer(mode="wrap")
|
|
2643
3213
|
def serialize_model(self, handler):
|
|
2644
|
-
optional_fields =
|
|
2645
|
-
|
|
2646
|
-
|
|
2647
|
-
|
|
3214
|
+
optional_fields = set(
|
|
3215
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3216
|
+
)
|
|
3217
|
+
nullable_fields = set(
|
|
3218
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3219
|
+
)
|
|
2648
3220
|
serialized = handler(self)
|
|
2649
|
-
|
|
2650
3221
|
m = {}
|
|
2651
3222
|
|
|
2652
3223
|
for n, f in type(self).model_fields.items():
|
|
2653
3224
|
k = f.alias or n
|
|
2654
3225
|
val = serialized.get(k)
|
|
2655
|
-
|
|
2656
|
-
|
|
2657
|
-
|
|
2658
|
-
|
|
2659
|
-
|
|
2660
|
-
|
|
2661
|
-
|
|
2662
|
-
|
|
2663
|
-
|
|
2664
|
-
|
|
2665
|
-
|
|
2666
|
-
|
|
2667
|
-
):
|
|
2668
|
-
m[k] = val
|
|
3226
|
+
is_nullable_and_explicitly_set = (
|
|
3227
|
+
k in nullable_fields
|
|
3228
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3229
|
+
)
|
|
3230
|
+
|
|
3231
|
+
if val != UNSET_SENTINEL:
|
|
3232
|
+
if (
|
|
3233
|
+
val is not None
|
|
3234
|
+
or k not in optional_fields
|
|
3235
|
+
or is_nullable_and_explicitly_set
|
|
3236
|
+
):
|
|
3237
|
+
m[k] = val
|
|
2669
3238
|
|
|
2670
3239
|
return m
|
|
2671
3240
|
|
|
2672
3241
|
|
|
2673
|
-
class
|
|
3242
|
+
class CreateChatCompletionRouterChatCompletionsCompletionTokensDetailsTypedDict(
|
|
3243
|
+
TypedDict
|
|
3244
|
+
):
|
|
2674
3245
|
reasoning_tokens: NotRequired[Nullable[float]]
|
|
2675
3246
|
accepted_prediction_tokens: NotRequired[Nullable[float]]
|
|
2676
3247
|
rejected_prediction_tokens: NotRequired[Nullable[float]]
|
|
@@ -2678,7 +3249,7 @@ class CreateChatCompletionRouterCompletionTokensDetailsTypedDict(TypedDict):
|
|
|
2678
3249
|
r"""The number of audio output tokens produced by the response."""
|
|
2679
3250
|
|
|
2680
3251
|
|
|
2681
|
-
class
|
|
3252
|
+
class CreateChatCompletionRouterChatCompletionsCompletionTokensDetails(BaseModel):
|
|
2682
3253
|
reasoning_tokens: OptionalNullable[float] = UNSET
|
|
2683
3254
|
|
|
2684
3255
|
accepted_prediction_tokens: OptionalNullable[float] = UNSET
|
|
@@ -2690,46 +3261,45 @@ class CreateChatCompletionRouterCompletionTokensDetails(BaseModel):
|
|
|
2690
3261
|
|
|
2691
3262
|
@model_serializer(mode="wrap")
|
|
2692
3263
|
def serialize_model(self, handler):
|
|
2693
|
-
optional_fields =
|
|
2694
|
-
|
|
2695
|
-
|
|
2696
|
-
|
|
2697
|
-
|
|
2698
|
-
|
|
2699
|
-
|
|
2700
|
-
|
|
2701
|
-
|
|
2702
|
-
|
|
2703
|
-
|
|
2704
|
-
|
|
2705
|
-
|
|
2706
|
-
|
|
3264
|
+
optional_fields = set(
|
|
3265
|
+
[
|
|
3266
|
+
"reasoning_tokens",
|
|
3267
|
+
"accepted_prediction_tokens",
|
|
3268
|
+
"rejected_prediction_tokens",
|
|
3269
|
+
"audio_tokens",
|
|
3270
|
+
]
|
|
3271
|
+
)
|
|
3272
|
+
nullable_fields = set(
|
|
3273
|
+
[
|
|
3274
|
+
"reasoning_tokens",
|
|
3275
|
+
"accepted_prediction_tokens",
|
|
3276
|
+
"rejected_prediction_tokens",
|
|
3277
|
+
"audio_tokens",
|
|
3278
|
+
]
|
|
3279
|
+
)
|
|
2707
3280
|
serialized = handler(self)
|
|
2708
|
-
|
|
2709
3281
|
m = {}
|
|
2710
3282
|
|
|
2711
3283
|
for n, f in type(self).model_fields.items():
|
|
2712
3284
|
k = f.alias or n
|
|
2713
3285
|
val = serialized.get(k)
|
|
2714
|
-
|
|
2715
|
-
|
|
2716
|
-
|
|
2717
|
-
|
|
2718
|
-
|
|
2719
|
-
|
|
2720
|
-
|
|
2721
|
-
|
|
2722
|
-
|
|
2723
|
-
|
|
2724
|
-
|
|
2725
|
-
|
|
2726
|
-
):
|
|
2727
|
-
m[k] = val
|
|
3286
|
+
is_nullable_and_explicitly_set = (
|
|
3287
|
+
k in nullable_fields
|
|
3288
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3289
|
+
)
|
|
3290
|
+
|
|
3291
|
+
if val != UNSET_SENTINEL:
|
|
3292
|
+
if (
|
|
3293
|
+
val is not None
|
|
3294
|
+
or k not in optional_fields
|
|
3295
|
+
or is_nullable_and_explicitly_set
|
|
3296
|
+
):
|
|
3297
|
+
m[k] = val
|
|
2728
3298
|
|
|
2729
3299
|
return m
|
|
2730
3300
|
|
|
2731
3301
|
|
|
2732
|
-
class
|
|
3302
|
+
class CreateChatCompletionRouterChatCompletionsUsageTypedDict(TypedDict):
|
|
2733
3303
|
r"""Usage statistics for the completion request."""
|
|
2734
3304
|
|
|
2735
3305
|
completion_tokens: NotRequired[float]
|
|
@@ -2739,14 +3309,16 @@ class CreateChatCompletionRouterUsageTypedDict(TypedDict):
|
|
|
2739
3309
|
total_tokens: NotRequired[float]
|
|
2740
3310
|
r"""Total number of tokens used in the request (prompt + completion)."""
|
|
2741
3311
|
prompt_tokens_details: NotRequired[
|
|
2742
|
-
Nullable[
|
|
3312
|
+
Nullable[CreateChatCompletionRouterChatCompletionsPromptTokensDetailsTypedDict]
|
|
2743
3313
|
]
|
|
2744
3314
|
completion_tokens_details: NotRequired[
|
|
2745
|
-
Nullable[
|
|
3315
|
+
Nullable[
|
|
3316
|
+
CreateChatCompletionRouterChatCompletionsCompletionTokensDetailsTypedDict
|
|
3317
|
+
]
|
|
2746
3318
|
]
|
|
2747
3319
|
|
|
2748
3320
|
|
|
2749
|
-
class
|
|
3321
|
+
class CreateChatCompletionRouterChatCompletionsUsage(BaseModel):
|
|
2750
3322
|
r"""Usage statistics for the completion request."""
|
|
2751
3323
|
|
|
2752
3324
|
completion_tokens: Optional[float] = None
|
|
@@ -2759,51 +3331,48 @@ class CreateChatCompletionRouterUsage(BaseModel):
|
|
|
2759
3331
|
r"""Total number of tokens used in the request (prompt + completion)."""
|
|
2760
3332
|
|
|
2761
3333
|
prompt_tokens_details: OptionalNullable[
|
|
2762
|
-
|
|
3334
|
+
CreateChatCompletionRouterChatCompletionsPromptTokensDetails
|
|
2763
3335
|
] = UNSET
|
|
2764
3336
|
|
|
2765
3337
|
completion_tokens_details: OptionalNullable[
|
|
2766
|
-
|
|
3338
|
+
CreateChatCompletionRouterChatCompletionsCompletionTokensDetails
|
|
2767
3339
|
] = UNSET
|
|
2768
3340
|
|
|
2769
3341
|
@model_serializer(mode="wrap")
|
|
2770
3342
|
def serialize_model(self, handler):
|
|
2771
|
-
optional_fields =
|
|
2772
|
-
|
|
2773
|
-
|
|
2774
|
-
|
|
2775
|
-
|
|
2776
|
-
|
|
2777
|
-
|
|
2778
|
-
|
|
2779
|
-
|
|
2780
|
-
|
|
3343
|
+
optional_fields = set(
|
|
3344
|
+
[
|
|
3345
|
+
"completion_tokens",
|
|
3346
|
+
"prompt_tokens",
|
|
3347
|
+
"total_tokens",
|
|
3348
|
+
"prompt_tokens_details",
|
|
3349
|
+
"completion_tokens_details",
|
|
3350
|
+
]
|
|
3351
|
+
)
|
|
3352
|
+
nullable_fields = set(["prompt_tokens_details", "completion_tokens_details"])
|
|
2781
3353
|
serialized = handler(self)
|
|
2782
|
-
|
|
2783
3354
|
m = {}
|
|
2784
3355
|
|
|
2785
3356
|
for n, f in type(self).model_fields.items():
|
|
2786
3357
|
k = f.alias or n
|
|
2787
3358
|
val = serialized.get(k)
|
|
2788
|
-
|
|
2789
|
-
|
|
2790
|
-
|
|
2791
|
-
|
|
2792
|
-
|
|
2793
|
-
|
|
2794
|
-
|
|
2795
|
-
|
|
2796
|
-
|
|
2797
|
-
|
|
2798
|
-
|
|
2799
|
-
|
|
2800
|
-
):
|
|
2801
|
-
m[k] = val
|
|
3359
|
+
is_nullable_and_explicitly_set = (
|
|
3360
|
+
k in nullable_fields
|
|
3361
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3362
|
+
)
|
|
3363
|
+
|
|
3364
|
+
if val != UNSET_SENTINEL:
|
|
3365
|
+
if (
|
|
3366
|
+
val is not None
|
|
3367
|
+
or k not in optional_fields
|
|
3368
|
+
or is_nullable_and_explicitly_set
|
|
3369
|
+
):
|
|
3370
|
+
m[k] = val
|
|
2802
3371
|
|
|
2803
3372
|
return m
|
|
2804
3373
|
|
|
2805
3374
|
|
|
2806
|
-
|
|
3375
|
+
CreateChatCompletionRouterChatCompletionsObject = Literal["chat.completion.chunk",]
|
|
2807
3376
|
|
|
2808
3377
|
|
|
2809
3378
|
class CreateChatCompletionDataTypedDict(TypedDict):
|
|
@@ -2811,16 +3380,18 @@ class CreateChatCompletionDataTypedDict(TypedDict):
|
|
|
2811
3380
|
|
|
2812
3381
|
id: str
|
|
2813
3382
|
r"""A unique identifier for the chat completion."""
|
|
2814
|
-
choices: List[
|
|
3383
|
+
choices: List[CreateChatCompletionRouterChatCompletionsChoicesTypedDict]
|
|
2815
3384
|
r"""A list of chat completion choices. Can contain more than one elements if n is greater than 1. Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}."""
|
|
2816
3385
|
created: float
|
|
2817
3386
|
r"""The Unix timestamp (in seconds) of when the chat completion was created."""
|
|
2818
3387
|
model: str
|
|
2819
3388
|
r"""The model used for the chat completion."""
|
|
2820
|
-
object:
|
|
3389
|
+
object: CreateChatCompletionRouterChatCompletionsObject
|
|
2821
3390
|
system_fingerprint: NotRequired[Nullable[str]]
|
|
2822
3391
|
r"""This fingerprint represents the backend configuration that the model runs with."""
|
|
2823
|
-
usage: NotRequired[
|
|
3392
|
+
usage: NotRequired[
|
|
3393
|
+
Nullable[CreateChatCompletionRouterChatCompletionsUsageTypedDict]
|
|
3394
|
+
]
|
|
2824
3395
|
r"""Usage statistics for the completion request."""
|
|
2825
3396
|
|
|
2826
3397
|
|
|
@@ -2830,7 +3401,7 @@ class CreateChatCompletionData(BaseModel):
|
|
|
2830
3401
|
id: str
|
|
2831
3402
|
r"""A unique identifier for the chat completion."""
|
|
2832
3403
|
|
|
2833
|
-
choices: List[
|
|
3404
|
+
choices: List[CreateChatCompletionRouterChatCompletionsChoices]
|
|
2834
3405
|
r"""A list of chat completion choices. Can contain more than one elements if n is greater than 1. Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}."""
|
|
2835
3406
|
|
|
2836
3407
|
created: float
|
|
@@ -2839,58 +3410,69 @@ class CreateChatCompletionData(BaseModel):
|
|
|
2839
3410
|
model: str
|
|
2840
3411
|
r"""The model used for the chat completion."""
|
|
2841
3412
|
|
|
2842
|
-
object:
|
|
3413
|
+
object: CreateChatCompletionRouterChatCompletionsObject
|
|
2843
3414
|
|
|
2844
3415
|
system_fingerprint: OptionalNullable[str] = UNSET
|
|
2845
3416
|
r"""This fingerprint represents the backend configuration that the model runs with."""
|
|
2846
3417
|
|
|
2847
|
-
usage: OptionalNullable[
|
|
3418
|
+
usage: OptionalNullable[CreateChatCompletionRouterChatCompletionsUsage] = UNSET
|
|
2848
3419
|
r"""Usage statistics for the completion request."""
|
|
2849
3420
|
|
|
2850
3421
|
@model_serializer(mode="wrap")
|
|
2851
3422
|
def serialize_model(self, handler):
|
|
2852
|
-
optional_fields = ["system_fingerprint", "usage"]
|
|
2853
|
-
nullable_fields = ["system_fingerprint", "usage"]
|
|
2854
|
-
null_default_fields = []
|
|
2855
|
-
|
|
3423
|
+
optional_fields = set(["system_fingerprint", "usage"])
|
|
3424
|
+
nullable_fields = set(["system_fingerprint", "usage"])
|
|
2856
3425
|
serialized = handler(self)
|
|
2857
|
-
|
|
2858
3426
|
m = {}
|
|
2859
3427
|
|
|
2860
3428
|
for n, f in type(self).model_fields.items():
|
|
2861
3429
|
k = f.alias or n
|
|
2862
3430
|
val = serialized.get(k)
|
|
2863
|
-
|
|
2864
|
-
|
|
2865
|
-
|
|
2866
|
-
|
|
2867
|
-
|
|
2868
|
-
|
|
2869
|
-
|
|
2870
|
-
|
|
2871
|
-
|
|
2872
|
-
|
|
2873
|
-
|
|
2874
|
-
|
|
2875
|
-
):
|
|
2876
|
-
m[k] = val
|
|
3431
|
+
is_nullable_and_explicitly_set = (
|
|
3432
|
+
k in nullable_fields
|
|
3433
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3434
|
+
)
|
|
3435
|
+
|
|
3436
|
+
if val != UNSET_SENTINEL:
|
|
3437
|
+
if (
|
|
3438
|
+
val is not None
|
|
3439
|
+
or k not in optional_fields
|
|
3440
|
+
or is_nullable_and_explicitly_set
|
|
3441
|
+
):
|
|
3442
|
+
m[k] = val
|
|
2877
3443
|
|
|
2878
3444
|
return m
|
|
2879
3445
|
|
|
2880
3446
|
|
|
2881
|
-
class
|
|
3447
|
+
class CreateChatCompletionRouterChatCompletionsResponseBodyTypedDict(TypedDict):
|
|
2882
3448
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2883
3449
|
|
|
2884
3450
|
data: NotRequired[CreateChatCompletionDataTypedDict]
|
|
2885
3451
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2886
3452
|
|
|
2887
3453
|
|
|
2888
|
-
class
|
|
3454
|
+
class CreateChatCompletionRouterChatCompletionsResponseBody(BaseModel):
|
|
2889
3455
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2890
3456
|
|
|
2891
3457
|
data: Optional[CreateChatCompletionData] = None
|
|
2892
3458
|
r"""Represents a streamed chunk of a chat completion response returned by model, based on the provided input."""
|
|
2893
3459
|
|
|
3460
|
+
@model_serializer(mode="wrap")
|
|
3461
|
+
def serialize_model(self, handler):
|
|
3462
|
+
optional_fields = set(["data"])
|
|
3463
|
+
serialized = handler(self)
|
|
3464
|
+
m = {}
|
|
3465
|
+
|
|
3466
|
+
for n, f in type(self).model_fields.items():
|
|
3467
|
+
k = f.alias or n
|
|
3468
|
+
val = serialized.get(k)
|
|
3469
|
+
|
|
3470
|
+
if val != UNSET_SENTINEL:
|
|
3471
|
+
if val is not None or k not in optional_fields:
|
|
3472
|
+
m[k] = val
|
|
3473
|
+
|
|
3474
|
+
return m
|
|
3475
|
+
|
|
2894
3476
|
|
|
2895
3477
|
CreateChatCompletionFinishReason = Literal[
|
|
2896
3478
|
"stop",
|
|
@@ -2902,29 +3484,45 @@ CreateChatCompletionFinishReason = Literal[
|
|
|
2902
3484
|
r"""The reason the model stopped generating tokens."""
|
|
2903
3485
|
|
|
2904
3486
|
|
|
2905
|
-
|
|
3487
|
+
CreateChatCompletionRouterChatCompletionsResponseType = Literal["function",]
|
|
2906
3488
|
|
|
2907
3489
|
|
|
2908
|
-
class
|
|
3490
|
+
class CreateChatCompletionRouterChatCompletionsFunctionTypedDict(TypedDict):
|
|
2909
3491
|
name: NotRequired[str]
|
|
2910
3492
|
r"""The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2911
3493
|
arguments: NotRequired[str]
|
|
2912
3494
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2913
3495
|
|
|
2914
3496
|
|
|
2915
|
-
class
|
|
3497
|
+
class CreateChatCompletionRouterChatCompletionsFunction(BaseModel):
|
|
2916
3498
|
name: Optional[str] = None
|
|
2917
3499
|
r"""The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2918
3500
|
|
|
2919
3501
|
arguments: Optional[str] = None
|
|
2920
3502
|
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
2921
3503
|
|
|
3504
|
+
@model_serializer(mode="wrap")
|
|
3505
|
+
def serialize_model(self, handler):
|
|
3506
|
+
optional_fields = set(["name", "arguments"])
|
|
3507
|
+
serialized = handler(self)
|
|
3508
|
+
m = {}
|
|
3509
|
+
|
|
3510
|
+
for n, f in type(self).model_fields.items():
|
|
3511
|
+
k = f.alias or n
|
|
3512
|
+
val = serialized.get(k)
|
|
3513
|
+
|
|
3514
|
+
if val != UNSET_SENTINEL:
|
|
3515
|
+
if val is not None or k not in optional_fields:
|
|
3516
|
+
m[k] = val
|
|
3517
|
+
|
|
3518
|
+
return m
|
|
3519
|
+
|
|
2922
3520
|
|
|
2923
3521
|
class CreateChatCompletionToolCallsTypedDict(TypedDict):
|
|
2924
3522
|
index: NotRequired[float]
|
|
2925
3523
|
id: NotRequired[str]
|
|
2926
|
-
type: NotRequired[
|
|
2927
|
-
function: NotRequired[
|
|
3524
|
+
type: NotRequired[CreateChatCompletionRouterChatCompletionsResponseType]
|
|
3525
|
+
function: NotRequired[CreateChatCompletionRouterChatCompletionsFunctionTypedDict]
|
|
2928
3526
|
thought_signature: NotRequired[str]
|
|
2929
3527
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
2930
3528
|
|
|
@@ -2934,18 +3532,34 @@ class CreateChatCompletionToolCalls(BaseModel):
|
|
|
2934
3532
|
|
|
2935
3533
|
id: Optional[str] = None
|
|
2936
3534
|
|
|
2937
|
-
type: Optional[
|
|
3535
|
+
type: Optional[CreateChatCompletionRouterChatCompletionsResponseType] = None
|
|
2938
3536
|
|
|
2939
|
-
function: Optional[
|
|
3537
|
+
function: Optional[CreateChatCompletionRouterChatCompletionsFunction] = None
|
|
2940
3538
|
|
|
2941
3539
|
thought_signature: Optional[str] = None
|
|
2942
3540
|
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
2943
3541
|
|
|
3542
|
+
@model_serializer(mode="wrap")
|
|
3543
|
+
def serialize_model(self, handler):
|
|
3544
|
+
optional_fields = set(["index", "id", "type", "function", "thought_signature"])
|
|
3545
|
+
serialized = handler(self)
|
|
3546
|
+
m = {}
|
|
3547
|
+
|
|
3548
|
+
for n, f in type(self).model_fields.items():
|
|
3549
|
+
k = f.alias or n
|
|
3550
|
+
val = serialized.get(k)
|
|
3551
|
+
|
|
3552
|
+
if val != UNSET_SENTINEL:
|
|
3553
|
+
if val is not None or k not in optional_fields:
|
|
3554
|
+
m[k] = val
|
|
3555
|
+
|
|
3556
|
+
return m
|
|
3557
|
+
|
|
2944
3558
|
|
|
2945
3559
|
CreateChatCompletionRole = Literal["assistant",]
|
|
2946
3560
|
|
|
2947
3561
|
|
|
2948
|
-
class
|
|
3562
|
+
class CreateChatCompletionRouterChatCompletionsAudioTypedDict(TypedDict):
|
|
2949
3563
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
2950
3564
|
|
|
2951
3565
|
id: str
|
|
@@ -2954,7 +3568,7 @@ class CreateChatCompletionRouterAudioTypedDict(TypedDict):
|
|
|
2954
3568
|
transcript: str
|
|
2955
3569
|
|
|
2956
3570
|
|
|
2957
|
-
class
|
|
3571
|
+
class CreateChatCompletionRouterChatCompletionsAudio(BaseModel):
|
|
2958
3572
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
2959
3573
|
|
|
2960
3574
|
id: str
|
|
@@ -2979,7 +3593,9 @@ class CreateChatCompletionMessageTypedDict(TypedDict):
|
|
|
2979
3593
|
r"""The signature holds a cryptographic token which verifies that the thinking block was generated by the model, and is verified when thinking is part of a multiturn conversation. This value should not be modified and should always be sent to the API when the reasoning is redacted. Currently only supported by `Anthropic`."""
|
|
2980
3594
|
redacted_reasoning: NotRequired[str]
|
|
2981
3595
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
2982
|
-
audio: NotRequired[
|
|
3596
|
+
audio: NotRequired[
|
|
3597
|
+
Nullable[CreateChatCompletionRouterChatCompletionsAudioTypedDict]
|
|
3598
|
+
]
|
|
2983
3599
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
2984
3600
|
|
|
2985
3601
|
|
|
@@ -3003,51 +3619,44 @@ class CreateChatCompletionMessage(BaseModel):
|
|
|
3003
3619
|
redacted_reasoning: Optional[str] = None
|
|
3004
3620
|
r"""Occasionally the model's internal reasoning will be flagged by the safety systems of the provider. When this occurs, the provider will encrypt the reasoning. These redacted reasoning is decrypted when passed back to the API, allowing the model to continue its response without losing context."""
|
|
3005
3621
|
|
|
3006
|
-
audio: OptionalNullable[
|
|
3622
|
+
audio: OptionalNullable[CreateChatCompletionRouterChatCompletionsAudio] = UNSET
|
|
3007
3623
|
r"""If the audio output modality is requested, this object contains data about the audio response from the model."""
|
|
3008
3624
|
|
|
3009
3625
|
@model_serializer(mode="wrap")
|
|
3010
3626
|
def serialize_model(self, handler):
|
|
3011
|
-
optional_fields =
|
|
3012
|
-
|
|
3013
|
-
|
|
3014
|
-
|
|
3015
|
-
|
|
3016
|
-
|
|
3017
|
-
|
|
3018
|
-
|
|
3019
|
-
|
|
3020
|
-
|
|
3021
|
-
|
|
3022
|
-
|
|
3023
|
-
|
|
3024
|
-
"reasoning",
|
|
3025
|
-
|
|
3026
|
-
"audio",
|
|
3027
|
-
]
|
|
3028
|
-
null_default_fields = []
|
|
3029
|
-
|
|
3627
|
+
optional_fields = set(
|
|
3628
|
+
[
|
|
3629
|
+
"content",
|
|
3630
|
+
"refusal",
|
|
3631
|
+
"tool_calls",
|
|
3632
|
+
"role",
|
|
3633
|
+
"reasoning",
|
|
3634
|
+
"reasoning_signature",
|
|
3635
|
+
"redacted_reasoning",
|
|
3636
|
+
"audio",
|
|
3637
|
+
]
|
|
3638
|
+
)
|
|
3639
|
+
nullable_fields = set(
|
|
3640
|
+
["content", "refusal", "reasoning", "reasoning_signature", "audio"]
|
|
3641
|
+
)
|
|
3030
3642
|
serialized = handler(self)
|
|
3031
|
-
|
|
3032
3643
|
m = {}
|
|
3033
3644
|
|
|
3034
3645
|
for n, f in type(self).model_fields.items():
|
|
3035
3646
|
k = f.alias or n
|
|
3036
3647
|
val = serialized.get(k)
|
|
3037
|
-
|
|
3038
|
-
|
|
3039
|
-
|
|
3040
|
-
|
|
3041
|
-
|
|
3042
|
-
|
|
3043
|
-
|
|
3044
|
-
|
|
3045
|
-
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
|
|
3049
|
-
):
|
|
3050
|
-
m[k] = val
|
|
3648
|
+
is_nullable_and_explicitly_set = (
|
|
3649
|
+
k in nullable_fields
|
|
3650
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3651
|
+
)
|
|
3652
|
+
|
|
3653
|
+
if val != UNSET_SENTINEL:
|
|
3654
|
+
if (
|
|
3655
|
+
val is not None
|
|
3656
|
+
or k not in optional_fields
|
|
3657
|
+
or is_nullable_and_explicitly_set
|
|
3658
|
+
):
|
|
3659
|
+
m[k] = val
|
|
3051
3660
|
|
|
3052
3661
|
return m
|
|
3053
3662
|
|
|
@@ -3073,30 +3682,14 @@ class CreateChatCompletionTopLogprobs(BaseModel):
|
|
|
3073
3682
|
|
|
3074
3683
|
@model_serializer(mode="wrap")
|
|
3075
3684
|
def serialize_model(self, handler):
|
|
3076
|
-
optional_fields = []
|
|
3077
|
-
nullable_fields = ["bytes"]
|
|
3078
|
-
null_default_fields = []
|
|
3079
|
-
|
|
3080
3685
|
serialized = handler(self)
|
|
3081
|
-
|
|
3082
3686
|
m = {}
|
|
3083
3687
|
|
|
3084
3688
|
for n, f in type(self).model_fields.items():
|
|
3085
3689
|
k = f.alias or n
|
|
3086
3690
|
val = serialized.get(k)
|
|
3087
|
-
serialized.pop(k, None)
|
|
3088
|
-
|
|
3089
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3090
|
-
is_set = (
|
|
3091
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3092
|
-
or k in null_default_fields
|
|
3093
|
-
) # pylint: disable=no-member
|
|
3094
3691
|
|
|
3095
|
-
if val
|
|
3096
|
-
m[k] = val
|
|
3097
|
-
elif val != UNSET_SENTINEL and (
|
|
3098
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3099
|
-
):
|
|
3692
|
+
if val != UNSET_SENTINEL:
|
|
3100
3693
|
m[k] = val
|
|
3101
3694
|
|
|
3102
3695
|
return m
|
|
@@ -3128,36 +3721,20 @@ class CreateChatCompletionContent(BaseModel):
|
|
|
3128
3721
|
|
|
3129
3722
|
@model_serializer(mode="wrap")
|
|
3130
3723
|
def serialize_model(self, handler):
|
|
3131
|
-
optional_fields = []
|
|
3132
|
-
nullable_fields = ["bytes"]
|
|
3133
|
-
null_default_fields = []
|
|
3134
|
-
|
|
3135
3724
|
serialized = handler(self)
|
|
3136
|
-
|
|
3137
3725
|
m = {}
|
|
3138
3726
|
|
|
3139
3727
|
for n, f in type(self).model_fields.items():
|
|
3140
3728
|
k = f.alias or n
|
|
3141
3729
|
val = serialized.get(k)
|
|
3142
|
-
serialized.pop(k, None)
|
|
3143
|
-
|
|
3144
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3145
|
-
is_set = (
|
|
3146
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3147
|
-
or k in null_default_fields
|
|
3148
|
-
) # pylint: disable=no-member
|
|
3149
3730
|
|
|
3150
|
-
if val
|
|
3151
|
-
m[k] = val
|
|
3152
|
-
elif val != UNSET_SENTINEL and (
|
|
3153
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3154
|
-
):
|
|
3731
|
+
if val != UNSET_SENTINEL:
|
|
3155
3732
|
m[k] = val
|
|
3156
3733
|
|
|
3157
3734
|
return m
|
|
3158
3735
|
|
|
3159
3736
|
|
|
3160
|
-
class
|
|
3737
|
+
class CreateChatCompletionRouterChatCompletionsTopLogprobsTypedDict(TypedDict):
|
|
3161
3738
|
token: str
|
|
3162
3739
|
r"""The token."""
|
|
3163
3740
|
logprob: float
|
|
@@ -3166,7 +3743,7 @@ class CreateChatCompletionRouterTopLogprobsTypedDict(TypedDict):
|
|
|
3166
3743
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
3167
3744
|
|
|
3168
3745
|
|
|
3169
|
-
class
|
|
3746
|
+
class CreateChatCompletionRouterChatCompletionsTopLogprobs(BaseModel):
|
|
3170
3747
|
token: str
|
|
3171
3748
|
r"""The token."""
|
|
3172
3749
|
|
|
@@ -3178,30 +3755,14 @@ class CreateChatCompletionRouterTopLogprobs(BaseModel):
|
|
|
3178
3755
|
|
|
3179
3756
|
@model_serializer(mode="wrap")
|
|
3180
3757
|
def serialize_model(self, handler):
|
|
3181
|
-
optional_fields = []
|
|
3182
|
-
nullable_fields = ["bytes"]
|
|
3183
|
-
null_default_fields = []
|
|
3184
|
-
|
|
3185
3758
|
serialized = handler(self)
|
|
3186
|
-
|
|
3187
3759
|
m = {}
|
|
3188
3760
|
|
|
3189
3761
|
for n, f in type(self).model_fields.items():
|
|
3190
3762
|
k = f.alias or n
|
|
3191
3763
|
val = serialized.get(k)
|
|
3192
|
-
serialized.pop(k, None)
|
|
3193
|
-
|
|
3194
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3195
|
-
is_set = (
|
|
3196
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3197
|
-
or k in null_default_fields
|
|
3198
|
-
) # pylint: disable=no-member
|
|
3199
3764
|
|
|
3200
|
-
if val
|
|
3201
|
-
m[k] = val
|
|
3202
|
-
elif val != UNSET_SENTINEL and (
|
|
3203
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3204
|
-
):
|
|
3765
|
+
if val != UNSET_SENTINEL:
|
|
3205
3766
|
m[k] = val
|
|
3206
3767
|
|
|
3207
3768
|
return m
|
|
@@ -3214,7 +3775,7 @@ class CreateChatCompletionRefusalTypedDict(TypedDict):
|
|
|
3214
3775
|
r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify that the token is very unlikely."""
|
|
3215
3776
|
bytes_: Nullable[List[float]]
|
|
3216
3777
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
3217
|
-
top_logprobs: List[
|
|
3778
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsTopLogprobsTypedDict]
|
|
3218
3779
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
3219
3780
|
|
|
3220
3781
|
|
|
@@ -3228,35 +3789,19 @@ class CreateChatCompletionRefusal(BaseModel):
|
|
|
3228
3789
|
bytes_: Annotated[Nullable[List[float]], pydantic.Field(alias="bytes")]
|
|
3229
3790
|
r"""A list of integers representing the UTF-8 bytes representation of the token."""
|
|
3230
3791
|
|
|
3231
|
-
top_logprobs: List[
|
|
3792
|
+
top_logprobs: List[CreateChatCompletionRouterChatCompletionsTopLogprobs]
|
|
3232
3793
|
r"""List of the most likely tokens and their log probability, at this token position."""
|
|
3233
3794
|
|
|
3234
3795
|
@model_serializer(mode="wrap")
|
|
3235
3796
|
def serialize_model(self, handler):
|
|
3236
|
-
optional_fields = []
|
|
3237
|
-
nullable_fields = ["bytes"]
|
|
3238
|
-
null_default_fields = []
|
|
3239
|
-
|
|
3240
3797
|
serialized = handler(self)
|
|
3241
|
-
|
|
3242
3798
|
m = {}
|
|
3243
3799
|
|
|
3244
3800
|
for n, f in type(self).model_fields.items():
|
|
3245
3801
|
k = f.alias or n
|
|
3246
3802
|
val = serialized.get(k)
|
|
3247
|
-
serialized.pop(k, None)
|
|
3248
|
-
|
|
3249
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3250
|
-
is_set = (
|
|
3251
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3252
|
-
or k in null_default_fields
|
|
3253
|
-
) # pylint: disable=no-member
|
|
3254
3803
|
|
|
3255
|
-
if val
|
|
3256
|
-
m[k] = val
|
|
3257
|
-
elif val != UNSET_SENTINEL and (
|
|
3258
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3259
|
-
):
|
|
3804
|
+
if val != UNSET_SENTINEL:
|
|
3260
3805
|
m[k] = val
|
|
3261
3806
|
|
|
3262
3807
|
return m
|
|
@@ -3282,30 +3827,14 @@ class CreateChatCompletionLogprobs(BaseModel):
|
|
|
3282
3827
|
|
|
3283
3828
|
@model_serializer(mode="wrap")
|
|
3284
3829
|
def serialize_model(self, handler):
|
|
3285
|
-
optional_fields = []
|
|
3286
|
-
nullable_fields = ["content", "refusal"]
|
|
3287
|
-
null_default_fields = []
|
|
3288
|
-
|
|
3289
3830
|
serialized = handler(self)
|
|
3290
|
-
|
|
3291
3831
|
m = {}
|
|
3292
3832
|
|
|
3293
3833
|
for n, f in type(self).model_fields.items():
|
|
3294
3834
|
k = f.alias or n
|
|
3295
3835
|
val = serialized.get(k)
|
|
3296
|
-
serialized.pop(k, None)
|
|
3297
|
-
|
|
3298
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
3299
|
-
is_set = (
|
|
3300
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
3301
|
-
or k in null_default_fields
|
|
3302
|
-
) # pylint: disable=no-member
|
|
3303
3836
|
|
|
3304
|
-
if val
|
|
3305
|
-
m[k] = val
|
|
3306
|
-
elif val != UNSET_SENTINEL and (
|
|
3307
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
3308
|
-
):
|
|
3837
|
+
if val != UNSET_SENTINEL:
|
|
3309
3838
|
m[k] = val
|
|
3310
3839
|
|
|
3311
3840
|
return m
|
|
@@ -3337,31 +3866,26 @@ class CreateChatCompletionChoices(BaseModel):
|
|
|
3337
3866
|
|
|
3338
3867
|
@model_serializer(mode="wrap")
|
|
3339
3868
|
def serialize_model(self, handler):
|
|
3340
|
-
optional_fields = ["index", "logprobs"]
|
|
3341
|
-
nullable_fields = ["finish_reason", "logprobs"]
|
|
3342
|
-
null_default_fields = []
|
|
3343
|
-
|
|
3869
|
+
optional_fields = set(["index", "logprobs"])
|
|
3870
|
+
nullable_fields = set(["finish_reason", "logprobs"])
|
|
3344
3871
|
serialized = handler(self)
|
|
3345
|
-
|
|
3346
3872
|
m = {}
|
|
3347
3873
|
|
|
3348
3874
|
for n, f in type(self).model_fields.items():
|
|
3349
3875
|
k = f.alias or n
|
|
3350
3876
|
val = serialized.get(k)
|
|
3351
|
-
|
|
3352
|
-
|
|
3353
|
-
|
|
3354
|
-
|
|
3355
|
-
|
|
3356
|
-
|
|
3357
|
-
|
|
3358
|
-
|
|
3359
|
-
|
|
3360
|
-
|
|
3361
|
-
|
|
3362
|
-
|
|
3363
|
-
):
|
|
3364
|
-
m[k] = val
|
|
3877
|
+
is_nullable_and_explicitly_set = (
|
|
3878
|
+
k in nullable_fields
|
|
3879
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3880
|
+
)
|
|
3881
|
+
|
|
3882
|
+
if val != UNSET_SENTINEL:
|
|
3883
|
+
if (
|
|
3884
|
+
val is not None
|
|
3885
|
+
or k not in optional_fields
|
|
3886
|
+
or is_nullable_and_explicitly_set
|
|
3887
|
+
):
|
|
3888
|
+
m[k] = val
|
|
3365
3889
|
|
|
3366
3890
|
return m
|
|
3367
3891
|
|
|
@@ -3383,31 +3907,30 @@ class CreateChatCompletionPromptTokensDetails(BaseModel):
|
|
|
3383
3907
|
|
|
3384
3908
|
@model_serializer(mode="wrap")
|
|
3385
3909
|
def serialize_model(self, handler):
|
|
3386
|
-
optional_fields =
|
|
3387
|
-
|
|
3388
|
-
|
|
3389
|
-
|
|
3910
|
+
optional_fields = set(
|
|
3911
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3912
|
+
)
|
|
3913
|
+
nullable_fields = set(
|
|
3914
|
+
["cached_tokens", "cache_creation_tokens", "audio_tokens"]
|
|
3915
|
+
)
|
|
3390
3916
|
serialized = handler(self)
|
|
3391
|
-
|
|
3392
3917
|
m = {}
|
|
3393
3918
|
|
|
3394
3919
|
for n, f in type(self).model_fields.items():
|
|
3395
3920
|
k = f.alias or n
|
|
3396
3921
|
val = serialized.get(k)
|
|
3397
|
-
|
|
3398
|
-
|
|
3399
|
-
|
|
3400
|
-
|
|
3401
|
-
|
|
3402
|
-
|
|
3403
|
-
|
|
3404
|
-
|
|
3405
|
-
|
|
3406
|
-
|
|
3407
|
-
|
|
3408
|
-
|
|
3409
|
-
):
|
|
3410
|
-
m[k] = val
|
|
3922
|
+
is_nullable_and_explicitly_set = (
|
|
3923
|
+
k in nullable_fields
|
|
3924
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3925
|
+
)
|
|
3926
|
+
|
|
3927
|
+
if val != UNSET_SENTINEL:
|
|
3928
|
+
if (
|
|
3929
|
+
val is not None
|
|
3930
|
+
or k not in optional_fields
|
|
3931
|
+
or is_nullable_and_explicitly_set
|
|
3932
|
+
):
|
|
3933
|
+
m[k] = val
|
|
3411
3934
|
|
|
3412
3935
|
return m
|
|
3413
3936
|
|
|
@@ -3432,41 +3955,40 @@ class CreateChatCompletionCompletionTokensDetails(BaseModel):
|
|
|
3432
3955
|
|
|
3433
3956
|
@model_serializer(mode="wrap")
|
|
3434
3957
|
def serialize_model(self, handler):
|
|
3435
|
-
optional_fields =
|
|
3436
|
-
|
|
3437
|
-
|
|
3438
|
-
|
|
3439
|
-
|
|
3440
|
-
|
|
3441
|
-
|
|
3442
|
-
|
|
3443
|
-
|
|
3444
|
-
|
|
3445
|
-
|
|
3446
|
-
|
|
3447
|
-
|
|
3448
|
-
|
|
3958
|
+
optional_fields = set(
|
|
3959
|
+
[
|
|
3960
|
+
"reasoning_tokens",
|
|
3961
|
+
"accepted_prediction_tokens",
|
|
3962
|
+
"rejected_prediction_tokens",
|
|
3963
|
+
"audio_tokens",
|
|
3964
|
+
]
|
|
3965
|
+
)
|
|
3966
|
+
nullable_fields = set(
|
|
3967
|
+
[
|
|
3968
|
+
"reasoning_tokens",
|
|
3969
|
+
"accepted_prediction_tokens",
|
|
3970
|
+
"rejected_prediction_tokens",
|
|
3971
|
+
"audio_tokens",
|
|
3972
|
+
]
|
|
3973
|
+
)
|
|
3449
3974
|
serialized = handler(self)
|
|
3450
|
-
|
|
3451
3975
|
m = {}
|
|
3452
3976
|
|
|
3453
3977
|
for n, f in type(self).model_fields.items():
|
|
3454
3978
|
k = f.alias or n
|
|
3455
3979
|
val = serialized.get(k)
|
|
3456
|
-
|
|
3457
|
-
|
|
3458
|
-
|
|
3459
|
-
|
|
3460
|
-
|
|
3461
|
-
|
|
3462
|
-
|
|
3463
|
-
|
|
3464
|
-
|
|
3465
|
-
|
|
3466
|
-
|
|
3467
|
-
|
|
3468
|
-
):
|
|
3469
|
-
m[k] = val
|
|
3980
|
+
is_nullable_and_explicitly_set = (
|
|
3981
|
+
k in nullable_fields
|
|
3982
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3983
|
+
)
|
|
3984
|
+
|
|
3985
|
+
if val != UNSET_SENTINEL:
|
|
3986
|
+
if (
|
|
3987
|
+
val is not None
|
|
3988
|
+
or k not in optional_fields
|
|
3989
|
+
or is_nullable_and_explicitly_set
|
|
3990
|
+
):
|
|
3991
|
+
m[k] = val
|
|
3470
3992
|
|
|
3471
3993
|
return m
|
|
3472
3994
|
|
|
@@ -3510,37 +4032,34 @@ class CreateChatCompletionUsage(BaseModel):
|
|
|
3510
4032
|
|
|
3511
4033
|
@model_serializer(mode="wrap")
|
|
3512
4034
|
def serialize_model(self, handler):
|
|
3513
|
-
optional_fields =
|
|
3514
|
-
|
|
3515
|
-
|
|
3516
|
-
|
|
3517
|
-
|
|
3518
|
-
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
|
|
3522
|
-
|
|
4035
|
+
optional_fields = set(
|
|
4036
|
+
[
|
|
4037
|
+
"completion_tokens",
|
|
4038
|
+
"prompt_tokens",
|
|
4039
|
+
"total_tokens",
|
|
4040
|
+
"prompt_tokens_details",
|
|
4041
|
+
"completion_tokens_details",
|
|
4042
|
+
]
|
|
4043
|
+
)
|
|
4044
|
+
nullable_fields = set(["prompt_tokens_details", "completion_tokens_details"])
|
|
3523
4045
|
serialized = handler(self)
|
|
3524
|
-
|
|
3525
4046
|
m = {}
|
|
3526
4047
|
|
|
3527
4048
|
for n, f in type(self).model_fields.items():
|
|
3528
4049
|
k = f.alias or n
|
|
3529
4050
|
val = serialized.get(k)
|
|
3530
|
-
|
|
3531
|
-
|
|
3532
|
-
|
|
3533
|
-
|
|
3534
|
-
|
|
3535
|
-
|
|
3536
|
-
|
|
3537
|
-
|
|
3538
|
-
|
|
3539
|
-
|
|
3540
|
-
|
|
3541
|
-
|
|
3542
|
-
):
|
|
3543
|
-
m[k] = val
|
|
4051
|
+
is_nullable_and_explicitly_set = (
|
|
4052
|
+
k in nullable_fields
|
|
4053
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
4054
|
+
)
|
|
4055
|
+
|
|
4056
|
+
if val != UNSET_SENTINEL:
|
|
4057
|
+
if (
|
|
4058
|
+
val is not None
|
|
4059
|
+
or k not in optional_fields
|
|
4060
|
+
or is_nullable_and_explicitly_set
|
|
4061
|
+
):
|
|
4062
|
+
m[k] = val
|
|
3544
4063
|
|
|
3545
4064
|
return m
|
|
3546
4065
|
|
|
@@ -3591,31 +4110,26 @@ class CreateChatCompletionResponseBody(BaseModel):
|
|
|
3591
4110
|
|
|
3592
4111
|
@model_serializer(mode="wrap")
|
|
3593
4112
|
def serialize_model(self, handler):
|
|
3594
|
-
optional_fields = ["system_fingerprint", "usage"]
|
|
3595
|
-
nullable_fields = ["system_fingerprint", "usage"]
|
|
3596
|
-
null_default_fields = []
|
|
3597
|
-
|
|
4113
|
+
optional_fields = set(["system_fingerprint", "usage"])
|
|
4114
|
+
nullable_fields = set(["system_fingerprint", "usage"])
|
|
3598
4115
|
serialized = handler(self)
|
|
3599
|
-
|
|
3600
4116
|
m = {}
|
|
3601
4117
|
|
|
3602
4118
|
for n, f in type(self).model_fields.items():
|
|
3603
4119
|
k = f.alias or n
|
|
3604
4120
|
val = serialized.get(k)
|
|
3605
|
-
|
|
3606
|
-
|
|
3607
|
-
|
|
3608
|
-
|
|
3609
|
-
|
|
3610
|
-
|
|
3611
|
-
|
|
3612
|
-
|
|
3613
|
-
|
|
3614
|
-
|
|
3615
|
-
|
|
3616
|
-
|
|
3617
|
-
):
|
|
3618
|
-
m[k] = val
|
|
4121
|
+
is_nullable_and_explicitly_set = (
|
|
4122
|
+
k in nullable_fields
|
|
4123
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
4124
|
+
)
|
|
4125
|
+
|
|
4126
|
+
if val != UNSET_SENTINEL:
|
|
4127
|
+
if (
|
|
4128
|
+
val is not None
|
|
4129
|
+
or k not in optional_fields
|
|
4130
|
+
or is_nullable_and_explicitly_set
|
|
4131
|
+
):
|
|
4132
|
+
m[k] = val
|
|
3619
4133
|
|
|
3620
4134
|
return m
|
|
3621
4135
|
|
|
@@ -3625,9 +4139,11 @@ CreateChatCompletionResponseTypedDict = TypeAliasType(
|
|
|
3625
4139
|
Union[
|
|
3626
4140
|
CreateChatCompletionResponseBodyTypedDict,
|
|
3627
4141
|
Union[
|
|
3628
|
-
eventstreaming.EventStream[
|
|
4142
|
+
eventstreaming.EventStream[
|
|
4143
|
+
CreateChatCompletionRouterChatCompletionsResponseBodyTypedDict
|
|
4144
|
+
],
|
|
3629
4145
|
eventstreaming.EventStreamAsync[
|
|
3630
|
-
|
|
4146
|
+
CreateChatCompletionRouterChatCompletionsResponseBodyTypedDict
|
|
3631
4147
|
],
|
|
3632
4148
|
],
|
|
3633
4149
|
],
|
|
@@ -3639,8 +4155,12 @@ CreateChatCompletionResponse = TypeAliasType(
|
|
|
3639
4155
|
Union[
|
|
3640
4156
|
CreateChatCompletionResponseBody,
|
|
3641
4157
|
Union[
|
|
3642
|
-
eventstreaming.EventStream[
|
|
3643
|
-
|
|
4158
|
+
eventstreaming.EventStream[
|
|
4159
|
+
CreateChatCompletionRouterChatCompletionsResponseBody
|
|
4160
|
+
],
|
|
4161
|
+
eventstreaming.EventStreamAsync[
|
|
4162
|
+
CreateChatCompletionRouterChatCompletionsResponseBody
|
|
4163
|
+
],
|
|
3644
4164
|
],
|
|
3645
4165
|
],
|
|
3646
4166
|
)
|