orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5746 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1945 -383
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1381 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +407 -128
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2759 -1251
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1696 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1679 -230
- orq_ai_sdk/models/getpromptversionop.py +1676 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +805 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1690 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1462 -196
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1439 -194
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1968 -397
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2854 -1448
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +364 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
- orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -17,6 +17,14 @@ from .redactedreasoningpartschema import (
|
|
|
17
17
|
)
|
|
18
18
|
from .refusalpartschema import RefusalPartSchema, RefusalPartSchemaTypedDict
|
|
19
19
|
from .textcontentpartschema import TextContentPartSchema, TextContentPartSchemaTypedDict
|
|
20
|
+
from .thinkingconfigdisabledschema import (
|
|
21
|
+
ThinkingConfigDisabledSchema,
|
|
22
|
+
ThinkingConfigDisabledSchemaTypedDict,
|
|
23
|
+
)
|
|
24
|
+
from .thinkingconfigenabledschema import (
|
|
25
|
+
ThinkingConfigEnabledSchema,
|
|
26
|
+
ThinkingConfigEnabledSchemaTypedDict,
|
|
27
|
+
)
|
|
20
28
|
from orq_ai_sdk.types import (
|
|
21
29
|
BaseModel,
|
|
22
30
|
Nullable,
|
|
@@ -89,1284 +97,1348 @@ class CreatePromptMetadata(BaseModel):
|
|
|
89
97
|
|
|
90
98
|
@model_serializer(mode="wrap")
|
|
91
99
|
def serialize_model(self, handler):
|
|
92
|
-
optional_fields = ["use_cases", "language"]
|
|
93
|
-
nullable_fields = ["language"]
|
|
94
|
-
null_default_fields = []
|
|
95
|
-
|
|
100
|
+
optional_fields = set(["use_cases", "language"])
|
|
101
|
+
nullable_fields = set(["language"])
|
|
96
102
|
serialized = handler(self)
|
|
97
|
-
|
|
98
103
|
m = {}
|
|
99
104
|
|
|
100
105
|
for n, f in type(self).model_fields.items():
|
|
101
106
|
k = f.alias or n
|
|
102
107
|
val = serialized.get(k)
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
):
|
|
116
|
-
m[k] = val
|
|
108
|
+
is_nullable_and_explicitly_set = (
|
|
109
|
+
k in nullable_fields
|
|
110
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
if val != UNSET_SENTINEL:
|
|
114
|
+
if (
|
|
115
|
+
val is not None
|
|
116
|
+
or k not in optional_fields
|
|
117
|
+
or is_nullable_and_explicitly_set
|
|
118
|
+
):
|
|
119
|
+
m[k] = val
|
|
117
120
|
|
|
118
121
|
return m
|
|
119
122
|
|
|
120
123
|
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
"assistant",
|
|
124
|
-
"user",
|
|
125
|
-
"exception",
|
|
126
|
-
"tool",
|
|
127
|
-
"prompt",
|
|
128
|
-
"correction",
|
|
129
|
-
"expected_output",
|
|
130
|
-
]
|
|
131
|
-
r"""The role of the prompt message"""
|
|
124
|
+
CreatePromptMessagesPromptsRequestRequestBodyRole = Literal["tool",]
|
|
125
|
+
r"""The role of the messages author, in this case tool."""
|
|
132
126
|
|
|
133
127
|
|
|
134
|
-
|
|
135
|
-
r"""The type of the content part. Always `file`."""
|
|
128
|
+
CreatePromptContentPromptsRequest2TypedDict = TextContentPartSchemaTypedDict
|
|
136
129
|
|
|
137
130
|
|
|
138
|
-
|
|
139
|
-
file_data: NotRequired[str]
|
|
140
|
-
r"""The file data as a data URI string in the format 'data:<mime-type>;base64,<base64-encoded-data>'. Example: 'data:image/png;base64,iVBORw0KGgoAAAANS...'"""
|
|
141
|
-
uri: NotRequired[str]
|
|
142
|
-
r"""URL to the file. Only supported by Anthropic Claude models for PDF files."""
|
|
143
|
-
mime_type: NotRequired[str]
|
|
144
|
-
r"""MIME type of the file (e.g., application/pdf, image/png)"""
|
|
145
|
-
filename: NotRequired[str]
|
|
146
|
-
r"""The name of the file, used when passing the file to the model as a string."""
|
|
131
|
+
CreatePromptContentPromptsRequest2 = TextContentPartSchema
|
|
147
132
|
|
|
148
133
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
134
|
+
CreatePromptMessagesPromptsRequestRequestBodyContentTypedDict = TypeAliasType(
|
|
135
|
+
"CreatePromptMessagesPromptsRequestRequestBodyContentTypedDict",
|
|
136
|
+
Union[str, List[CreatePromptContentPromptsRequest2TypedDict]],
|
|
137
|
+
)
|
|
138
|
+
r"""The contents of the tool message."""
|
|
152
139
|
|
|
153
|
-
uri: Optional[str] = None
|
|
154
|
-
r"""URL to the file. Only supported by Anthropic Claude models for PDF files."""
|
|
155
140
|
|
|
156
|
-
|
|
157
|
-
|
|
141
|
+
CreatePromptMessagesPromptsRequestRequestBodyContent = TypeAliasType(
|
|
142
|
+
"CreatePromptMessagesPromptsRequestRequestBodyContent",
|
|
143
|
+
Union[str, List[CreatePromptContentPromptsRequest2]],
|
|
144
|
+
)
|
|
145
|
+
r"""The contents of the tool message."""
|
|
158
146
|
|
|
159
|
-
filename: Optional[str] = None
|
|
160
|
-
r"""The name of the file, used when passing the file to the model as a string."""
|
|
161
147
|
|
|
148
|
+
CreatePromptMessagesPromptsType = Literal["ephemeral",]
|
|
149
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
162
150
|
|
|
163
|
-
class Two3TypedDict(TypedDict):
|
|
164
|
-
type: CreatePrompt2PromptsRequestType
|
|
165
|
-
r"""The type of the content part. Always `file`."""
|
|
166
|
-
file: CreatePrompt2FileTypedDict
|
|
167
151
|
|
|
152
|
+
CreatePromptMessagesTTL = Literal[
|
|
153
|
+
"5m",
|
|
154
|
+
"1h",
|
|
155
|
+
]
|
|
156
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
168
157
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
r"""The type of the content part. Always `file`."""
|
|
158
|
+
- `5m`: 5 minutes
|
|
159
|
+
- `1h`: 1 hour
|
|
172
160
|
|
|
173
|
-
|
|
161
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
162
|
+
"""
|
|
174
163
|
|
|
175
164
|
|
|
176
|
-
|
|
165
|
+
class CreatePromptMessagesCacheControlTypedDict(TypedDict):
|
|
166
|
+
type: CreatePromptMessagesPromptsType
|
|
167
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
168
|
+
ttl: NotRequired[CreatePromptMessagesTTL]
|
|
169
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
177
170
|
|
|
171
|
+
- `5m`: 5 minutes
|
|
172
|
+
- `1h`: 1 hour
|
|
178
173
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
r"""Either a URL of the image or the base64 encoded data URI."""
|
|
182
|
-
detail: NotRequired[str]
|
|
183
|
-
r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
|
|
174
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
175
|
+
"""
|
|
184
176
|
|
|
185
177
|
|
|
186
|
-
class
|
|
187
|
-
|
|
188
|
-
r"""
|
|
178
|
+
class CreatePromptMessagesCacheControl(BaseModel):
|
|
179
|
+
type: CreatePromptMessagesPromptsType
|
|
180
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
189
181
|
|
|
190
|
-
|
|
191
|
-
r"""
|
|
182
|
+
ttl: Optional[CreatePromptMessagesTTL] = "5m"
|
|
183
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
192
184
|
|
|
185
|
+
- `5m`: 5 minutes
|
|
186
|
+
- `1h`: 1 hour
|
|
193
187
|
|
|
194
|
-
|
|
195
|
-
|
|
188
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
189
|
+
"""
|
|
196
190
|
|
|
197
|
-
|
|
198
|
-
|
|
191
|
+
@model_serializer(mode="wrap")
|
|
192
|
+
def serialize_model(self, handler):
|
|
193
|
+
optional_fields = set(["ttl"])
|
|
194
|
+
serialized = handler(self)
|
|
195
|
+
m = {}
|
|
199
196
|
|
|
197
|
+
for n, f in type(self).model_fields.items():
|
|
198
|
+
k = f.alias or n
|
|
199
|
+
val = serialized.get(k)
|
|
200
200
|
|
|
201
|
-
|
|
202
|
-
|
|
201
|
+
if val != UNSET_SENTINEL:
|
|
202
|
+
if val is not None or k not in optional_fields:
|
|
203
|
+
m[k] = val
|
|
203
204
|
|
|
204
|
-
|
|
205
|
+
return m
|
|
205
206
|
|
|
206
|
-
image_url: CreatePrompt2ImageURL
|
|
207
207
|
|
|
208
|
+
class CreatePromptMessagesToolMessageTypedDict(TypedDict):
|
|
209
|
+
role: CreatePromptMessagesPromptsRequestRequestBodyRole
|
|
210
|
+
r"""The role of the messages author, in this case tool."""
|
|
211
|
+
content: CreatePromptMessagesPromptsRequestRequestBodyContentTypedDict
|
|
212
|
+
r"""The contents of the tool message."""
|
|
213
|
+
tool_call_id: Nullable[str]
|
|
214
|
+
r"""Tool call that this message is responding to."""
|
|
215
|
+
cache_control: NotRequired[CreatePromptMessagesCacheControlTypedDict]
|
|
208
216
|
|
|
209
|
-
CreatePrompt2Type = Literal["text",]
|
|
210
217
|
|
|
218
|
+
class CreatePromptMessagesToolMessage(BaseModel):
|
|
219
|
+
role: CreatePromptMessagesPromptsRequestRequestBodyRole
|
|
220
|
+
r"""The role of the messages author, in this case tool."""
|
|
211
221
|
|
|
212
|
-
|
|
213
|
-
r"""
|
|
222
|
+
content: CreatePromptMessagesPromptsRequestRequestBodyContent
|
|
223
|
+
r"""The contents of the tool message."""
|
|
214
224
|
|
|
215
|
-
|
|
216
|
-
|
|
225
|
+
tool_call_id: Nullable[str]
|
|
226
|
+
r"""Tool call that this message is responding to."""
|
|
217
227
|
|
|
228
|
+
cache_control: Optional[CreatePromptMessagesCacheControl] = None
|
|
218
229
|
|
|
219
|
-
|
|
220
|
-
|
|
230
|
+
@model_serializer(mode="wrap")
|
|
231
|
+
def serialize_model(self, handler):
|
|
232
|
+
optional_fields = set(["cache_control"])
|
|
233
|
+
nullable_fields = set(["tool_call_id"])
|
|
234
|
+
serialized = handler(self)
|
|
235
|
+
m = {}
|
|
221
236
|
|
|
222
|
-
|
|
237
|
+
for n, f in type(self).model_fields.items():
|
|
238
|
+
k = f.alias or n
|
|
239
|
+
val = serialized.get(k)
|
|
240
|
+
is_nullable_and_explicitly_set = (
|
|
241
|
+
k in nullable_fields
|
|
242
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
if val != UNSET_SENTINEL:
|
|
246
|
+
if (
|
|
247
|
+
val is not None
|
|
248
|
+
or k not in optional_fields
|
|
249
|
+
or is_nullable_and_explicitly_set
|
|
250
|
+
):
|
|
251
|
+
m[k] = val
|
|
223
252
|
|
|
224
|
-
|
|
253
|
+
return m
|
|
225
254
|
|
|
226
255
|
|
|
227
|
-
|
|
228
|
-
"
|
|
229
|
-
Union[
|
|
256
|
+
CreatePromptContentPrompts2TypedDict = TypeAliasType(
|
|
257
|
+
"CreatePromptContentPrompts2TypedDict",
|
|
258
|
+
Union[
|
|
259
|
+
RefusalPartSchemaTypedDict,
|
|
260
|
+
RedactedReasoningPartSchemaTypedDict,
|
|
261
|
+
TextContentPartSchemaTypedDict,
|
|
262
|
+
ReasoningPartSchemaTypedDict,
|
|
263
|
+
],
|
|
230
264
|
)
|
|
231
265
|
|
|
232
266
|
|
|
233
|
-
|
|
267
|
+
CreatePromptContentPrompts2 = Annotated[
|
|
234
268
|
Union[
|
|
235
|
-
Annotated[
|
|
236
|
-
Annotated[
|
|
237
|
-
Annotated[
|
|
269
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
270
|
+
Annotated[RefusalPartSchema, Tag("refusal")],
|
|
271
|
+
Annotated[ReasoningPartSchema, Tag("reasoning")],
|
|
272
|
+
Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
|
|
238
273
|
],
|
|
239
274
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
240
275
|
]
|
|
241
276
|
|
|
242
277
|
|
|
243
|
-
|
|
244
|
-
"
|
|
278
|
+
CreatePromptMessagesPromptsRequestContentTypedDict = TypeAliasType(
|
|
279
|
+
"CreatePromptMessagesPromptsRequestContentTypedDict",
|
|
280
|
+
Union[str, List[CreatePromptContentPrompts2TypedDict]],
|
|
245
281
|
)
|
|
246
|
-
r"""The contents of the
|
|
282
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
247
283
|
|
|
248
284
|
|
|
249
|
-
|
|
250
|
-
"
|
|
285
|
+
CreatePromptMessagesPromptsRequestContent = TypeAliasType(
|
|
286
|
+
"CreatePromptMessagesPromptsRequestContent",
|
|
287
|
+
Union[str, List[CreatePromptContentPrompts2]],
|
|
251
288
|
)
|
|
252
|
-
r"""The contents of the
|
|
289
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
253
290
|
|
|
254
291
|
|
|
255
|
-
|
|
292
|
+
CreatePromptMessagesPromptsRequestRole = Literal["assistant",]
|
|
293
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
256
294
|
|
|
257
295
|
|
|
258
|
-
class
|
|
259
|
-
|
|
260
|
-
arguments: str
|
|
261
|
-
r"""JSON string arguments for the functions"""
|
|
296
|
+
class CreatePromptMessagesAudioTypedDict(TypedDict):
|
|
297
|
+
r"""Data about a previous audio response from the model."""
|
|
262
298
|
|
|
299
|
+
id: str
|
|
300
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
263
301
|
|
|
264
|
-
class CreatePromptFunction(BaseModel):
|
|
265
|
-
name: str
|
|
266
302
|
|
|
267
|
-
|
|
268
|
-
r"""
|
|
303
|
+
class CreatePromptMessagesAudio(BaseModel):
|
|
304
|
+
r"""Data about a previous audio response from the model."""
|
|
269
305
|
|
|
306
|
+
id: str
|
|
307
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
270
308
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
id: NotRequired[str]
|
|
275
|
-
index: NotRequired[float]
|
|
309
|
+
|
|
310
|
+
CreatePromptMessagesType = Literal["function",]
|
|
311
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
276
312
|
|
|
277
313
|
|
|
278
|
-
class
|
|
279
|
-
|
|
314
|
+
class CreatePromptMessagesFunctionTypedDict(TypedDict):
|
|
315
|
+
name: NotRequired[str]
|
|
316
|
+
r"""The name of the function to call."""
|
|
317
|
+
arguments: NotRequired[str]
|
|
318
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
280
319
|
|
|
281
|
-
function: CreatePromptFunction
|
|
282
320
|
|
|
283
|
-
|
|
321
|
+
class CreatePromptMessagesFunction(BaseModel):
|
|
322
|
+
name: Optional[str] = None
|
|
323
|
+
r"""The name of the function to call."""
|
|
284
324
|
|
|
285
|
-
|
|
325
|
+
arguments: Optional[str] = None
|
|
326
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
286
327
|
|
|
328
|
+
@model_serializer(mode="wrap")
|
|
329
|
+
def serialize_model(self, handler):
|
|
330
|
+
optional_fields = set(["name", "arguments"])
|
|
331
|
+
serialized = handler(self)
|
|
332
|
+
m = {}
|
|
287
333
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
content: Nullable[CreatePromptContentTypedDict]
|
|
292
|
-
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
293
|
-
tool_calls: NotRequired[List[CreatePromptToolCallsTypedDict]]
|
|
294
|
-
tool_call_id: NotRequired[Nullable[str]]
|
|
334
|
+
for n, f in type(self).model_fields.items():
|
|
335
|
+
k = f.alias or n
|
|
336
|
+
val = serialized.get(k)
|
|
295
337
|
|
|
338
|
+
if val != UNSET_SENTINEL:
|
|
339
|
+
if val is not None or k not in optional_fields:
|
|
340
|
+
m[k] = val
|
|
296
341
|
|
|
297
|
-
|
|
298
|
-
role: CreatePromptRole
|
|
299
|
-
r"""The role of the prompt message"""
|
|
342
|
+
return m
|
|
300
343
|
|
|
301
|
-
content: Nullable[CreatePromptContent]
|
|
302
|
-
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
303
344
|
|
|
304
|
-
|
|
345
|
+
class CreatePromptMessagesToolCallsTypedDict(TypedDict):
|
|
346
|
+
id: str
|
|
347
|
+
r"""The ID of the tool call."""
|
|
348
|
+
type: CreatePromptMessagesType
|
|
349
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
350
|
+
function: CreatePromptMessagesFunctionTypedDict
|
|
351
|
+
thought_signature: NotRequired[str]
|
|
352
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
305
353
|
|
|
306
|
-
|
|
354
|
+
|
|
355
|
+
class CreatePromptMessagesToolCalls(BaseModel):
|
|
356
|
+
id: str
|
|
357
|
+
r"""The ID of the tool call."""
|
|
358
|
+
|
|
359
|
+
type: CreatePromptMessagesType
|
|
360
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
361
|
+
|
|
362
|
+
function: CreatePromptMessagesFunction
|
|
363
|
+
|
|
364
|
+
thought_signature: Optional[str] = None
|
|
365
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
307
366
|
|
|
308
367
|
@model_serializer(mode="wrap")
|
|
309
368
|
def serialize_model(self, handler):
|
|
310
|
-
optional_fields = ["
|
|
311
|
-
nullable_fields = ["content", "tool_call_id"]
|
|
312
|
-
null_default_fields = []
|
|
313
|
-
|
|
369
|
+
optional_fields = set(["thought_signature"])
|
|
314
370
|
serialized = handler(self)
|
|
315
|
-
|
|
316
371
|
m = {}
|
|
317
372
|
|
|
318
373
|
for n, f in type(self).model_fields.items():
|
|
319
374
|
k = f.alias or n
|
|
320
375
|
val = serialized.get(k)
|
|
321
|
-
serialized.pop(k, None)
|
|
322
|
-
|
|
323
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
324
|
-
is_set = (
|
|
325
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
326
|
-
or k in null_default_fields
|
|
327
|
-
) # pylint: disable=no-member
|
|
328
376
|
|
|
329
|
-
if val
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
333
|
-
):
|
|
334
|
-
m[k] = val
|
|
377
|
+
if val != UNSET_SENTINEL:
|
|
378
|
+
if val is not None or k not in optional_fields:
|
|
379
|
+
m[k] = val
|
|
335
380
|
|
|
336
381
|
return m
|
|
337
382
|
|
|
338
383
|
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
"
|
|
342
|
-
|
|
343
|
-
"
|
|
344
|
-
]
|
|
345
|
-
r"""
|
|
384
|
+
class CreatePromptMessagesAssistantMessageTypedDict(TypedDict):
|
|
385
|
+
role: CreatePromptMessagesPromptsRequestRole
|
|
386
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
387
|
+
content: NotRequired[Nullable[CreatePromptMessagesPromptsRequestContentTypedDict]]
|
|
388
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
389
|
+
refusal: NotRequired[Nullable[str]]
|
|
390
|
+
r"""The refusal message by the assistant."""
|
|
391
|
+
name: NotRequired[str]
|
|
392
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
393
|
+
audio: NotRequired[Nullable[CreatePromptMessagesAudioTypedDict]]
|
|
394
|
+
r"""Data about a previous audio response from the model."""
|
|
395
|
+
tool_calls: NotRequired[List[CreatePromptMessagesToolCallsTypedDict]]
|
|
396
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
346
397
|
|
|
347
398
|
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
"
|
|
351
|
-
"srt",
|
|
352
|
-
"verbose_json",
|
|
353
|
-
"vtt",
|
|
354
|
-
]
|
|
399
|
+
class CreatePromptMessagesAssistantMessage(BaseModel):
|
|
400
|
+
role: CreatePromptMessagesPromptsRequestRole
|
|
401
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
355
402
|
|
|
403
|
+
content: OptionalNullable[CreatePromptMessagesPromptsRequestContent] = UNSET
|
|
404
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
356
405
|
|
|
357
|
-
|
|
358
|
-
"
|
|
359
|
-
"base64_json",
|
|
360
|
-
]
|
|
406
|
+
refusal: OptionalNullable[str] = UNSET
|
|
407
|
+
r"""The refusal message by the assistant."""
|
|
361
408
|
|
|
409
|
+
name: Optional[str] = None
|
|
410
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
362
411
|
|
|
363
|
-
|
|
364
|
-
"
|
|
365
|
-
"opus",
|
|
366
|
-
"aac",
|
|
367
|
-
"flac",
|
|
368
|
-
"wav",
|
|
369
|
-
"pcm",
|
|
370
|
-
]
|
|
412
|
+
audio: OptionalNullable[CreatePromptMessagesAudio] = UNSET
|
|
413
|
+
r"""Data about a previous audio response from the model."""
|
|
371
414
|
|
|
415
|
+
tool_calls: Optional[List[CreatePromptMessagesToolCalls]] = None
|
|
416
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
372
417
|
|
|
373
|
-
|
|
418
|
+
@model_serializer(mode="wrap")
|
|
419
|
+
def serialize_model(self, handler):
|
|
420
|
+
optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
|
|
421
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
422
|
+
serialized = handler(self)
|
|
423
|
+
m = {}
|
|
374
424
|
|
|
425
|
+
for n, f in type(self).model_fields.items():
|
|
426
|
+
k = f.alias or n
|
|
427
|
+
val = serialized.get(k)
|
|
428
|
+
is_nullable_and_explicitly_set = (
|
|
429
|
+
k in nullable_fields
|
|
430
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
if val != UNSET_SENTINEL:
|
|
434
|
+
if (
|
|
435
|
+
val is not None
|
|
436
|
+
or k not in optional_fields
|
|
437
|
+
or is_nullable_and_explicitly_set
|
|
438
|
+
):
|
|
439
|
+
m[k] = val
|
|
375
440
|
|
|
376
|
-
|
|
377
|
-
type: CreatePromptResponseFormatPromptsRequestType
|
|
441
|
+
return m
|
|
378
442
|
|
|
379
443
|
|
|
380
|
-
|
|
381
|
-
|
|
444
|
+
CreatePromptMessagesPromptsRole = Literal["user",]
|
|
445
|
+
r"""The role of the messages author, in this case `user`."""
|
|
382
446
|
|
|
383
447
|
|
|
384
|
-
|
|
448
|
+
CreatePrompt2Type = Literal["file",]
|
|
449
|
+
r"""The type of the content part. Always `file`."""
|
|
385
450
|
|
|
386
451
|
|
|
387
|
-
|
|
388
|
-
|
|
452
|
+
CreatePrompt2PromptsType = Literal["ephemeral",]
|
|
453
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
389
454
|
|
|
390
455
|
|
|
391
|
-
|
|
392
|
-
|
|
456
|
+
CreatePrompt2TTL = Literal[
|
|
457
|
+
"5m",
|
|
458
|
+
"1h",
|
|
459
|
+
]
|
|
460
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
393
461
|
|
|
462
|
+
- `5m`: 5 minutes
|
|
463
|
+
- `1h`: 1 hour
|
|
394
464
|
|
|
395
|
-
|
|
465
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
466
|
+
"""
|
|
396
467
|
|
|
397
468
|
|
|
398
|
-
class
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
469
|
+
class CreatePrompt2CacheControlTypedDict(TypedDict):
|
|
470
|
+
type: CreatePrompt2PromptsType
|
|
471
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
472
|
+
ttl: NotRequired[CreatePrompt2TTL]
|
|
473
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
403
474
|
|
|
475
|
+
- `5m`: 5 minutes
|
|
476
|
+
- `1h`: 1 hour
|
|
404
477
|
|
|
405
|
-
|
|
406
|
-
|
|
478
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
479
|
+
"""
|
|
407
480
|
|
|
408
|
-
schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
|
|
409
481
|
|
|
410
|
-
|
|
482
|
+
class CreatePrompt2CacheControl(BaseModel):
|
|
483
|
+
type: CreatePrompt2PromptsType
|
|
484
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
411
485
|
|
|
412
|
-
|
|
486
|
+
ttl: Optional[CreatePrompt2TTL] = "5m"
|
|
487
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
413
488
|
|
|
489
|
+
- `5m`: 5 minutes
|
|
490
|
+
- `1h`: 1 hour
|
|
414
491
|
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
json_schema: CreatePromptResponseFormatJSONSchemaTypedDict
|
|
418
|
-
display_name: NotRequired[str]
|
|
492
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
493
|
+
"""
|
|
419
494
|
|
|
495
|
+
@model_serializer(mode="wrap")
|
|
496
|
+
def serialize_model(self, handler):
|
|
497
|
+
optional_fields = set(["ttl"])
|
|
498
|
+
serialized = handler(self)
|
|
499
|
+
m = {}
|
|
420
500
|
|
|
421
|
-
|
|
422
|
-
|
|
501
|
+
for n, f in type(self).model_fields.items():
|
|
502
|
+
k = f.alias or n
|
|
503
|
+
val = serialized.get(k)
|
|
423
504
|
|
|
424
|
-
|
|
505
|
+
if val != UNSET_SENTINEL:
|
|
506
|
+
if val is not None or k not in optional_fields:
|
|
507
|
+
m[k] = val
|
|
425
508
|
|
|
426
|
-
|
|
509
|
+
return m
|
|
427
510
|
|
|
428
511
|
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
ResponseFormat4,
|
|
436
|
-
Five,
|
|
437
|
-
Six,
|
|
438
|
-
],
|
|
439
|
-
)
|
|
440
|
-
r"""An object specifying the format that the model must output.
|
|
512
|
+
class CreatePrompt24TypedDict(TypedDict):
|
|
513
|
+
type: CreatePrompt2Type
|
|
514
|
+
r"""The type of the content part. Always `file`."""
|
|
515
|
+
file: FileContentPartSchemaTypedDict
|
|
516
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
517
|
+
cache_control: NotRequired[CreatePrompt2CacheControlTypedDict]
|
|
441
518
|
|
|
442
|
-
Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
|
|
443
519
|
|
|
444
|
-
|
|
520
|
+
class CreatePrompt24(BaseModel):
|
|
521
|
+
type: CreatePrompt2Type
|
|
522
|
+
r"""The type of the content part. Always `file`."""
|
|
445
523
|
|
|
446
|
-
|
|
447
|
-
"""
|
|
524
|
+
file: FileContentPartSchema
|
|
525
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
526
|
+
|
|
527
|
+
cache_control: Optional[CreatePrompt2CacheControl] = None
|
|
528
|
+
|
|
529
|
+
@model_serializer(mode="wrap")
|
|
530
|
+
def serialize_model(self, handler):
|
|
531
|
+
optional_fields = set(["cache_control"])
|
|
532
|
+
serialized = handler(self)
|
|
533
|
+
m = {}
|
|
534
|
+
|
|
535
|
+
for n, f in type(self).model_fields.items():
|
|
536
|
+
k = f.alias or n
|
|
537
|
+
val = serialized.get(k)
|
|
538
|
+
|
|
539
|
+
if val != UNSET_SENTINEL:
|
|
540
|
+
if val is not None or k not in optional_fields:
|
|
541
|
+
m[k] = val
|
|
542
|
+
|
|
543
|
+
return m
|
|
448
544
|
|
|
449
545
|
|
|
450
|
-
|
|
451
|
-
"
|
|
546
|
+
CreatePromptContent2TypedDict = TypeAliasType(
|
|
547
|
+
"CreatePromptContent2TypedDict",
|
|
452
548
|
Union[
|
|
453
|
-
|
|
549
|
+
AudioContentPartSchemaTypedDict,
|
|
550
|
+
TextContentPartSchemaTypedDict,
|
|
551
|
+
ImageContentPartSchemaTypedDict,
|
|
552
|
+
CreatePrompt24TypedDict,
|
|
454
553
|
],
|
|
455
554
|
)
|
|
456
|
-
r"""An object specifying the format that the model must output.
|
|
457
555
|
|
|
458
|
-
Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
|
|
459
556
|
|
|
460
|
-
|
|
557
|
+
CreatePromptContent2 = Annotated[
|
|
558
|
+
Union[
|
|
559
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
560
|
+
Annotated[ImageContentPartSchema, Tag("image_url")],
|
|
561
|
+
Annotated[AudioContentPartSchema, Tag("input_audio")],
|
|
562
|
+
Annotated[CreatePrompt24, Tag("file")],
|
|
563
|
+
],
|
|
564
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
565
|
+
]
|
|
461
566
|
|
|
462
|
-
Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
|
|
463
|
-
"""
|
|
464
567
|
|
|
568
|
+
CreatePromptMessagesPromptsContentTypedDict = TypeAliasType(
|
|
569
|
+
"CreatePromptMessagesPromptsContentTypedDict",
|
|
570
|
+
Union[str, List[CreatePromptContent2TypedDict]],
|
|
571
|
+
)
|
|
572
|
+
r"""The contents of the user message."""
|
|
465
573
|
|
|
466
|
-
PhotoRealVersion = Literal[
|
|
467
|
-
"v1",
|
|
468
|
-
"v2",
|
|
469
|
-
]
|
|
470
|
-
r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
|
|
471
574
|
|
|
575
|
+
CreatePromptMessagesPromptsContent = TypeAliasType(
|
|
576
|
+
"CreatePromptMessagesPromptsContent", Union[str, List[CreatePromptContent2]]
|
|
577
|
+
)
|
|
578
|
+
r"""The contents of the user message."""
|
|
472
579
|
|
|
473
|
-
EncodingFormat = Literal[
|
|
474
|
-
"float",
|
|
475
|
-
"base64",
|
|
476
|
-
]
|
|
477
|
-
r"""The format to return the embeddings"""
|
|
478
580
|
|
|
581
|
+
class CreatePromptMessagesUserMessageTypedDict(TypedDict):
|
|
582
|
+
role: CreatePromptMessagesPromptsRole
|
|
583
|
+
r"""The role of the messages author, in this case `user`."""
|
|
584
|
+
content: CreatePromptMessagesPromptsContentTypedDict
|
|
585
|
+
r"""The contents of the user message."""
|
|
586
|
+
name: NotRequired[str]
|
|
587
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
479
588
|
|
|
480
|
-
CreatePromptReasoningEffort = Literal[
|
|
481
|
-
"none",
|
|
482
|
-
"disable",
|
|
483
|
-
"minimal",
|
|
484
|
-
"low",
|
|
485
|
-
"medium",
|
|
486
|
-
"high",
|
|
487
|
-
]
|
|
488
|
-
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
489
589
|
|
|
590
|
+
class CreatePromptMessagesUserMessage(BaseModel):
|
|
591
|
+
role: CreatePromptMessagesPromptsRole
|
|
592
|
+
r"""The role of the messages author, in this case `user`."""
|
|
490
593
|
|
|
491
|
-
|
|
492
|
-
"
|
|
493
|
-
"medium",
|
|
494
|
-
"high",
|
|
495
|
-
]
|
|
496
|
-
r"""Controls the verbosity of the model output."""
|
|
594
|
+
content: CreatePromptMessagesPromptsContent
|
|
595
|
+
r"""The contents of the user message."""
|
|
497
596
|
|
|
597
|
+
name: Optional[str] = None
|
|
598
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
498
599
|
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
600
|
+
@model_serializer(mode="wrap")
|
|
601
|
+
def serialize_model(self, handler):
|
|
602
|
+
optional_fields = set(["name"])
|
|
603
|
+
serialized = handler(self)
|
|
604
|
+
m = {}
|
|
504
605
|
|
|
606
|
+
for n, f in type(self).model_fields.items():
|
|
607
|
+
k = f.alias or n
|
|
608
|
+
val = serialized.get(k)
|
|
505
609
|
|
|
506
|
-
|
|
507
|
-
|
|
610
|
+
if val != UNSET_SENTINEL:
|
|
611
|
+
if val is not None or k not in optional_fields:
|
|
612
|
+
m[k] = val
|
|
508
613
|
|
|
509
|
-
|
|
510
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
511
|
-
max_tokens: NotRequired[float]
|
|
512
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
513
|
-
top_k: NotRequired[float]
|
|
514
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
515
|
-
top_p: NotRequired[float]
|
|
516
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
517
|
-
frequency_penalty: NotRequired[float]
|
|
518
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
519
|
-
presence_penalty: NotRequired[float]
|
|
520
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
521
|
-
num_images: NotRequired[float]
|
|
522
|
-
r"""Only supported on `image` models."""
|
|
523
|
-
seed: NotRequired[float]
|
|
524
|
-
r"""Best effort deterministic seed for the model. Currently only OpenAI models support these"""
|
|
525
|
-
format_: NotRequired[CreatePromptFormat]
|
|
526
|
-
r"""Only supported on `image` models."""
|
|
527
|
-
dimensions: NotRequired[str]
|
|
528
|
-
r"""Only supported on `image` models."""
|
|
529
|
-
quality: NotRequired[str]
|
|
530
|
-
r"""Only supported on `image` models."""
|
|
531
|
-
style: NotRequired[str]
|
|
532
|
-
r"""Only supported on `image` models."""
|
|
533
|
-
response_format: NotRequired[Nullable[CreatePromptResponseFormatTypedDict]]
|
|
534
|
-
r"""An object specifying the format that the model must output.
|
|
614
|
+
return m
|
|
535
615
|
|
|
536
|
-
Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
|
|
537
616
|
|
|
538
|
-
|
|
617
|
+
CreatePromptMessagesRole = Literal["system",]
|
|
618
|
+
r"""The role of the messages author, in this case `system`."""
|
|
539
619
|
|
|
540
|
-
Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
|
|
541
|
-
"""
|
|
542
|
-
photo_real_version: NotRequired[PhotoRealVersion]
|
|
543
|
-
r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
|
|
544
|
-
encoding_format: NotRequired[EncodingFormat]
|
|
545
|
-
r"""The format to return the embeddings"""
|
|
546
|
-
reasoning_effort: NotRequired[CreatePromptReasoningEffort]
|
|
547
|
-
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
548
|
-
budget_tokens: NotRequired[float]
|
|
549
|
-
r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
|
|
550
|
-
verbosity: NotRequired[Verbosity]
|
|
551
|
-
r"""Controls the verbosity of the model output."""
|
|
552
|
-
thinking_level: NotRequired[CreatePromptThinkingLevel]
|
|
553
|
-
r"""The level of thinking to use for the model. Only supported by `Google AI`"""
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
class ModelParameters(BaseModel):
|
|
557
|
-
r"""Optional model parameters like temperature and maxTokens."""
|
|
558
|
-
|
|
559
|
-
temperature: Optional[float] = None
|
|
560
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
561
|
-
|
|
562
|
-
max_tokens: Annotated[Optional[float], pydantic.Field(alias="maxTokens")] = None
|
|
563
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
564
|
-
|
|
565
|
-
top_k: Annotated[Optional[float], pydantic.Field(alias="topK")] = None
|
|
566
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
567
|
-
|
|
568
|
-
top_p: Annotated[Optional[float], pydantic.Field(alias="topP")] = None
|
|
569
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
570
|
-
|
|
571
|
-
frequency_penalty: Annotated[
|
|
572
|
-
Optional[float], pydantic.Field(alias="frequencyPenalty")
|
|
573
|
-
] = None
|
|
574
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
575
|
-
|
|
576
|
-
presence_penalty: Annotated[
|
|
577
|
-
Optional[float], pydantic.Field(alias="presencePenalty")
|
|
578
|
-
] = None
|
|
579
|
-
r"""Only supported on `chat` and `completion` models."""
|
|
580
|
-
|
|
581
|
-
num_images: Annotated[Optional[float], pydantic.Field(alias="numImages")] = None
|
|
582
|
-
r"""Only supported on `image` models."""
|
|
583
|
-
|
|
584
|
-
seed: Optional[float] = None
|
|
585
|
-
r"""Best effort deterministic seed for the model. Currently only OpenAI models support these"""
|
|
586
|
-
|
|
587
|
-
format_: Annotated[Optional[CreatePromptFormat], pydantic.Field(alias="format")] = (
|
|
588
|
-
None
|
|
589
|
-
)
|
|
590
|
-
r"""Only supported on `image` models."""
|
|
591
|
-
|
|
592
|
-
dimensions: Optional[str] = None
|
|
593
|
-
r"""Only supported on `image` models."""
|
|
594
620
|
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
621
|
+
CreatePromptMessagesContentTypedDict = TypeAliasType(
|
|
622
|
+
"CreatePromptMessagesContentTypedDict",
|
|
623
|
+
Union[str, List[TextContentPartSchemaTypedDict]],
|
|
624
|
+
)
|
|
625
|
+
r"""The contents of the system message."""
|
|
600
626
|
|
|
601
|
-
response_format: Annotated[
|
|
602
|
-
OptionalNullable[CreatePromptResponseFormat],
|
|
603
|
-
pydantic.Field(alias="responseFormat"),
|
|
604
|
-
] = UNSET
|
|
605
|
-
r"""An object specifying the format that the model must output.
|
|
606
627
|
|
|
607
|
-
|
|
628
|
+
CreatePromptMessagesContent = TypeAliasType(
|
|
629
|
+
"CreatePromptMessagesContent", Union[str, List[TextContentPartSchema]]
|
|
630
|
+
)
|
|
631
|
+
r"""The contents of the system message."""
|
|
608
632
|
|
|
609
|
-
Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.
|
|
610
633
|
|
|
611
|
-
|
|
612
|
-
"""
|
|
634
|
+
class CreatePromptMessagesSystemMessageTypedDict(TypedDict):
|
|
635
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
613
636
|
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
r"""The
|
|
637
|
+
role: CreatePromptMessagesRole
|
|
638
|
+
r"""The role of the messages author, in this case `system`."""
|
|
639
|
+
content: CreatePromptMessagesContentTypedDict
|
|
640
|
+
r"""The contents of the system message."""
|
|
641
|
+
name: NotRequired[str]
|
|
642
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
618
643
|
|
|
619
|
-
encoding_format: Optional[EncodingFormat] = None
|
|
620
|
-
r"""The format to return the embeddings"""
|
|
621
644
|
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
] = None
|
|
625
|
-
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
645
|
+
class CreatePromptMessagesSystemMessage(BaseModel):
|
|
646
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
626
647
|
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
)
|
|
630
|
-
r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
|
|
648
|
+
role: CreatePromptMessagesRole
|
|
649
|
+
r"""The role of the messages author, in this case `system`."""
|
|
631
650
|
|
|
632
|
-
|
|
633
|
-
r"""
|
|
651
|
+
content: CreatePromptMessagesContent
|
|
652
|
+
r"""The contents of the system message."""
|
|
634
653
|
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
] = None
|
|
638
|
-
r"""The level of thinking to use for the model. Only supported by `Google AI`"""
|
|
654
|
+
name: Optional[str] = None
|
|
655
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
639
656
|
|
|
640
657
|
@model_serializer(mode="wrap")
|
|
641
658
|
def serialize_model(self, handler):
|
|
642
|
-
optional_fields = [
|
|
643
|
-
"temperature",
|
|
644
|
-
"maxTokens",
|
|
645
|
-
"topK",
|
|
646
|
-
"topP",
|
|
647
|
-
"frequencyPenalty",
|
|
648
|
-
"presencePenalty",
|
|
649
|
-
"numImages",
|
|
650
|
-
"seed",
|
|
651
|
-
"format",
|
|
652
|
-
"dimensions",
|
|
653
|
-
"quality",
|
|
654
|
-
"style",
|
|
655
|
-
"responseFormat",
|
|
656
|
-
"photoRealVersion",
|
|
657
|
-
"encoding_format",
|
|
658
|
-
"reasoningEffort",
|
|
659
|
-
"budgetTokens",
|
|
660
|
-
"verbosity",
|
|
661
|
-
"thinkingLevel",
|
|
662
|
-
]
|
|
663
|
-
nullable_fields = ["responseFormat"]
|
|
664
|
-
null_default_fields = []
|
|
665
|
-
|
|
659
|
+
optional_fields = set(["name"])
|
|
666
660
|
serialized = handler(self)
|
|
667
|
-
|
|
668
661
|
m = {}
|
|
669
662
|
|
|
670
663
|
for n, f in type(self).model_fields.items():
|
|
671
664
|
k = f.alias or n
|
|
672
665
|
val = serialized.get(k)
|
|
673
|
-
serialized.pop(k, None)
|
|
674
666
|
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
or k in null_default_fields
|
|
679
|
-
) # pylint: disable=no-member
|
|
680
|
-
|
|
681
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
682
|
-
m[k] = val
|
|
683
|
-
elif val != UNSET_SENTINEL and (
|
|
684
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
685
|
-
):
|
|
686
|
-
m[k] = val
|
|
667
|
+
if val != UNSET_SENTINEL:
|
|
668
|
+
if val is not None or k not in optional_fields:
|
|
669
|
+
m[k] = val
|
|
687
670
|
|
|
688
671
|
return m
|
|
689
672
|
|
|
690
673
|
|
|
691
|
-
|
|
692
|
-
"
|
|
674
|
+
CreatePromptMessagesTypedDict = TypeAliasType(
|
|
675
|
+
"CreatePromptMessagesTypedDict",
|
|
676
|
+
Union[
|
|
677
|
+
CreatePromptMessagesSystemMessageTypedDict,
|
|
678
|
+
CreatePromptMessagesUserMessageTypedDict,
|
|
679
|
+
CreatePromptMessagesToolMessageTypedDict,
|
|
680
|
+
CreatePromptMessagesAssistantMessageTypedDict,
|
|
681
|
+
],
|
|
693
682
|
)
|
|
694
|
-
class PromptConfigurationTypedDict(TypedDict):
|
|
695
|
-
r"""[DEPRECATED]. Please use the `prompt` property instead. The current `prompt_config` will keep working but it will be deprecated in future versions. Configuration for the prompt including model and messages."""
|
|
696
683
|
|
|
697
|
-
messages: List[CreatePromptMessagesTypedDict]
|
|
698
|
-
r"""Array of messages that make up the conversation."""
|
|
699
|
-
model: NotRequired[str]
|
|
700
|
-
r"""Model ID used to generate the response, like `openai/gpt-4o` or `google/gemini-2.5-pro`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
701
|
-
model_parameters: NotRequired[ModelParametersTypedDict]
|
|
702
|
-
r"""Optional model parameters like temperature and maxTokens."""
|
|
703
684
|
|
|
685
|
+
CreatePromptMessages = Annotated[
|
|
686
|
+
Union[
|
|
687
|
+
Annotated[CreatePromptMessagesSystemMessage, Tag("system")],
|
|
688
|
+
Annotated[CreatePromptMessagesUserMessage, Tag("user")],
|
|
689
|
+
Annotated[CreatePromptMessagesAssistantMessage, Tag("assistant")],
|
|
690
|
+
Annotated[CreatePromptMessagesToolMessage, Tag("tool")],
|
|
691
|
+
],
|
|
692
|
+
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
693
|
+
]
|
|
704
694
|
|
|
705
|
-
@deprecated(
|
|
706
|
-
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
707
|
-
)
|
|
708
|
-
class PromptConfiguration(BaseModel):
|
|
709
|
-
r"""[DEPRECATED]. Please use the `prompt` property instead. The current `prompt_config` will keep working but it will be deprecated in future versions. Configuration for the prompt including model and messages."""
|
|
710
695
|
|
|
711
|
-
|
|
712
|
-
|
|
696
|
+
CreatePromptVoice = Literal[
|
|
697
|
+
"alloy",
|
|
698
|
+
"echo",
|
|
699
|
+
"fable",
|
|
700
|
+
"onyx",
|
|
701
|
+
"nova",
|
|
702
|
+
"shimmer",
|
|
703
|
+
]
|
|
704
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
713
705
|
|
|
714
|
-
model: Optional[str] = None
|
|
715
|
-
r"""Model ID used to generate the response, like `openai/gpt-4o` or `google/gemini-2.5-pro`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
716
706
|
|
|
717
|
-
|
|
718
|
-
|
|
707
|
+
CreatePromptFormat = Literal[
|
|
708
|
+
"wav",
|
|
709
|
+
"mp3",
|
|
710
|
+
"flac",
|
|
711
|
+
"opus",
|
|
712
|
+
"pcm16",
|
|
713
|
+
]
|
|
714
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
719
715
|
|
|
720
716
|
|
|
721
|
-
|
|
722
|
-
r"""
|
|
717
|
+
class CreatePromptAudioTypedDict(TypedDict):
|
|
718
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
723
719
|
|
|
720
|
+
voice: CreatePromptVoice
|
|
721
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
722
|
+
format_: CreatePromptFormat
|
|
723
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
724
724
|
|
|
725
|
-
CreatePromptContentPromptsRequestRequestBody2TypedDict = TextContentPartSchemaTypedDict
|
|
726
725
|
|
|
726
|
+
class CreatePromptAudio(BaseModel):
|
|
727
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
727
728
|
|
|
728
|
-
|
|
729
|
+
voice: CreatePromptVoice
|
|
730
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
729
731
|
|
|
732
|
+
format_: Annotated[CreatePromptFormat, pydantic.Field(alias="format")]
|
|
733
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
730
734
|
|
|
731
|
-
CreatePromptMessagesPromptsRequestRequestBodyContentTypedDict = TypeAliasType(
|
|
732
|
-
"CreatePromptMessagesPromptsRequestRequestBodyContentTypedDict",
|
|
733
|
-
Union[str, List[CreatePromptContentPromptsRequestRequestBody2TypedDict]],
|
|
734
|
-
)
|
|
735
|
-
r"""The contents of the tool message."""
|
|
736
735
|
|
|
736
|
+
CreatePromptResponseFormatPromptsRequestType = Literal["json_schema",]
|
|
737
737
|
|
|
738
|
-
CreatePromptMessagesPromptsRequestRequestBodyContent = TypeAliasType(
|
|
739
|
-
"CreatePromptMessagesPromptsRequestRequestBodyContent",
|
|
740
|
-
Union[str, List[CreatePromptContentPromptsRequestRequestBody2]],
|
|
741
|
-
)
|
|
742
|
-
r"""The contents of the tool message."""
|
|
743
738
|
|
|
739
|
+
class CreatePromptResponseFormatPromptsJSONSchemaTypedDict(TypedDict):
|
|
740
|
+
name: str
|
|
741
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
742
|
+
description: NotRequired[str]
|
|
743
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
744
|
+
schema_: NotRequired[Any]
|
|
745
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
746
|
+
strict: NotRequired[bool]
|
|
747
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
744
748
|
|
|
745
|
-
CreatePromptMessagesPromptsType = Literal["ephemeral",]
|
|
746
|
-
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
747
749
|
|
|
750
|
+
class CreatePromptResponseFormatPromptsJSONSchema(BaseModel):
|
|
751
|
+
name: str
|
|
752
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
748
753
|
|
|
749
|
-
|
|
750
|
-
"
|
|
751
|
-
"1h",
|
|
752
|
-
]
|
|
753
|
-
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
754
|
+
description: Optional[str] = None
|
|
755
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
754
756
|
|
|
755
|
-
|
|
756
|
-
|
|
757
|
+
schema_: Annotated[Optional[Any], pydantic.Field(alias="schema")] = None
|
|
758
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
757
759
|
|
|
758
|
-
|
|
759
|
-
"""
|
|
760
|
+
strict: Optional[bool] = False
|
|
761
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
760
762
|
|
|
763
|
+
@model_serializer(mode="wrap")
|
|
764
|
+
def serialize_model(self, handler):
|
|
765
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
766
|
+
serialized = handler(self)
|
|
767
|
+
m = {}
|
|
761
768
|
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
ttl: NotRequired[CreatePromptMessagesTTL]
|
|
766
|
-
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
769
|
+
for n, f in type(self).model_fields.items():
|
|
770
|
+
k = f.alias or n
|
|
771
|
+
val = serialized.get(k)
|
|
767
772
|
|
|
768
|
-
|
|
769
|
-
|
|
773
|
+
if val != UNSET_SENTINEL:
|
|
774
|
+
if val is not None or k not in optional_fields:
|
|
775
|
+
m[k] = val
|
|
770
776
|
|
|
771
|
-
|
|
772
|
-
"""
|
|
777
|
+
return m
|
|
773
778
|
|
|
774
779
|
|
|
775
|
-
class
|
|
776
|
-
|
|
777
|
-
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
780
|
+
class CreatePromptResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
781
|
+
r"""
|
|
778
782
|
|
|
779
|
-
|
|
780
|
-
|
|
783
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
784
|
+
"""
|
|
781
785
|
|
|
782
|
-
|
|
783
|
-
|
|
786
|
+
type: CreatePromptResponseFormatPromptsRequestType
|
|
787
|
+
json_schema: CreatePromptResponseFormatPromptsJSONSchemaTypedDict
|
|
784
788
|
|
|
785
|
-
|
|
789
|
+
|
|
790
|
+
class CreatePromptResponseFormatJSONSchema(BaseModel):
|
|
791
|
+
r"""
|
|
792
|
+
|
|
793
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
786
794
|
"""
|
|
787
795
|
|
|
796
|
+
type: CreatePromptResponseFormatPromptsRequestType
|
|
788
797
|
|
|
789
|
-
|
|
790
|
-
role: CreatePromptMessagesPromptsRequestRequestBodyRole
|
|
791
|
-
r"""The role of the messages author, in this case tool."""
|
|
792
|
-
content: CreatePromptMessagesPromptsRequestRequestBodyContentTypedDict
|
|
793
|
-
r"""The contents of the tool message."""
|
|
794
|
-
tool_call_id: Nullable[str]
|
|
795
|
-
r"""Tool call that this message is responding to."""
|
|
796
|
-
cache_control: NotRequired[CreatePromptMessagesCacheControlTypedDict]
|
|
798
|
+
json_schema: CreatePromptResponseFormatPromptsJSONSchema
|
|
797
799
|
|
|
798
800
|
|
|
799
|
-
|
|
800
|
-
role: CreatePromptMessagesPromptsRequestRequestBodyRole
|
|
801
|
-
r"""The role of the messages author, in this case tool."""
|
|
801
|
+
CreatePromptResponseFormatPromptsType = Literal["json_object",]
|
|
802
802
|
|
|
803
|
-
content: CreatePromptMessagesPromptsRequestRequestBodyContent
|
|
804
|
-
r"""The contents of the tool message."""
|
|
805
803
|
|
|
806
|
-
|
|
807
|
-
r"""
|
|
804
|
+
class CreatePromptResponseFormatJSONObjectTypedDict(TypedDict):
|
|
805
|
+
r"""
|
|
808
806
|
|
|
809
|
-
|
|
807
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
808
|
+
"""
|
|
810
809
|
|
|
811
|
-
|
|
812
|
-
def serialize_model(self, handler):
|
|
813
|
-
optional_fields = ["cache_control"]
|
|
814
|
-
nullable_fields = ["tool_call_id"]
|
|
815
|
-
null_default_fields = []
|
|
810
|
+
type: CreatePromptResponseFormatPromptsType
|
|
816
811
|
|
|
817
|
-
serialized = handler(self)
|
|
818
812
|
|
|
819
|
-
|
|
813
|
+
class CreatePromptResponseFormatJSONObject(BaseModel):
|
|
814
|
+
r"""
|
|
820
815
|
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
val = serialized.get(k)
|
|
824
|
-
serialized.pop(k, None)
|
|
816
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
817
|
+
"""
|
|
825
818
|
|
|
826
|
-
|
|
827
|
-
is_set = (
|
|
828
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
829
|
-
or k in null_default_fields
|
|
830
|
-
) # pylint: disable=no-member
|
|
819
|
+
type: CreatePromptResponseFormatPromptsType
|
|
831
820
|
|
|
832
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
833
|
-
m[k] = val
|
|
834
|
-
elif val != UNSET_SENTINEL and (
|
|
835
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
836
|
-
):
|
|
837
|
-
m[k] = val
|
|
838
821
|
|
|
839
|
-
|
|
822
|
+
CreatePromptResponseFormatType = Literal["text",]
|
|
840
823
|
|
|
841
824
|
|
|
842
|
-
|
|
843
|
-
"
|
|
844
|
-
Union[
|
|
845
|
-
RefusalPartSchemaTypedDict,
|
|
846
|
-
RedactedReasoningPartSchemaTypedDict,
|
|
847
|
-
TextContentPartSchemaTypedDict,
|
|
848
|
-
ReasoningPartSchemaTypedDict,
|
|
849
|
-
],
|
|
850
|
-
)
|
|
825
|
+
class CreatePromptResponseFormatTextTypedDict(TypedDict):
|
|
826
|
+
r"""
|
|
851
827
|
|
|
828
|
+
Default response format. Used to generate text responses
|
|
829
|
+
"""
|
|
852
830
|
|
|
853
|
-
|
|
854
|
-
Union[
|
|
855
|
-
Annotated[TextContentPartSchema, Tag("text")],
|
|
856
|
-
Annotated[RefusalPartSchema, Tag("refusal")],
|
|
857
|
-
Annotated[ReasoningPartSchema, Tag("reasoning")],
|
|
858
|
-
Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
|
|
859
|
-
],
|
|
860
|
-
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
861
|
-
]
|
|
831
|
+
type: CreatePromptResponseFormatType
|
|
862
832
|
|
|
863
833
|
|
|
864
|
-
|
|
865
|
-
"
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
834
|
+
class CreatePromptResponseFormatText(BaseModel):
|
|
835
|
+
r"""
|
|
836
|
+
|
|
837
|
+
Default response format. Used to generate text responses
|
|
838
|
+
"""
|
|
869
839
|
|
|
840
|
+
type: CreatePromptResponseFormatType
|
|
870
841
|
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
842
|
+
|
|
843
|
+
CreatePromptResponseFormatTypedDict = TypeAliasType(
|
|
844
|
+
"CreatePromptResponseFormatTypedDict",
|
|
845
|
+
Union[
|
|
846
|
+
CreatePromptResponseFormatTextTypedDict,
|
|
847
|
+
CreatePromptResponseFormatJSONObjectTypedDict,
|
|
848
|
+
CreatePromptResponseFormatJSONSchemaTypedDict,
|
|
849
|
+
],
|
|
874
850
|
)
|
|
875
|
-
r"""
|
|
851
|
+
r"""An object specifying the format that the model must output"""
|
|
876
852
|
|
|
877
853
|
|
|
878
|
-
|
|
879
|
-
|
|
854
|
+
CreatePromptResponseFormat = Annotated[
|
|
855
|
+
Union[
|
|
856
|
+
Annotated[CreatePromptResponseFormatText, Tag("text")],
|
|
857
|
+
Annotated[CreatePromptResponseFormatJSONObject, Tag("json_object")],
|
|
858
|
+
Annotated[CreatePromptResponseFormatJSONSchema, Tag("json_schema")],
|
|
859
|
+
],
|
|
860
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
861
|
+
]
|
|
862
|
+
r"""An object specifying the format that the model must output"""
|
|
880
863
|
|
|
881
864
|
|
|
882
|
-
|
|
883
|
-
|
|
865
|
+
CreatePromptReasoningEffort = Literal[
|
|
866
|
+
"none",
|
|
867
|
+
"minimal",
|
|
868
|
+
"low",
|
|
869
|
+
"medium",
|
|
870
|
+
"high",
|
|
871
|
+
"xhigh",
|
|
872
|
+
]
|
|
873
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
884
874
|
|
|
885
|
-
|
|
886
|
-
|
|
875
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
876
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
877
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
878
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
887
879
|
|
|
880
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
881
|
+
"""
|
|
888
882
|
|
|
889
|
-
class CreatePromptMessagesAudio(BaseModel):
|
|
890
|
-
r"""Data about a previous audio response from the model."""
|
|
891
883
|
|
|
892
|
-
|
|
893
|
-
|
|
884
|
+
CreatePromptStopTypedDict = TypeAliasType(
|
|
885
|
+
"CreatePromptStopTypedDict", Union[str, List[str]]
|
|
886
|
+
)
|
|
887
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
894
888
|
|
|
895
889
|
|
|
896
|
-
|
|
897
|
-
r"""
|
|
890
|
+
CreatePromptStop = TypeAliasType("CreatePromptStop", Union[str, List[str]])
|
|
891
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
898
892
|
|
|
899
893
|
|
|
900
|
-
class
|
|
901
|
-
|
|
902
|
-
r"""The name of the function to call."""
|
|
903
|
-
arguments: NotRequired[str]
|
|
904
|
-
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
894
|
+
class CreatePromptStreamOptionsTypedDict(TypedDict):
|
|
895
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
905
896
|
|
|
897
|
+
include_usage: NotRequired[bool]
|
|
898
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
906
899
|
|
|
907
|
-
class CreatePromptMessagesFunction(BaseModel):
|
|
908
|
-
name: Optional[str] = None
|
|
909
|
-
r"""The name of the function to call."""
|
|
910
900
|
|
|
911
|
-
|
|
912
|
-
r"""
|
|
901
|
+
class CreatePromptStreamOptions(BaseModel):
|
|
902
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
913
903
|
|
|
904
|
+
include_usage: Optional[bool] = None
|
|
905
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
914
906
|
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
function: CreatePromptMessagesFunctionTypedDict
|
|
921
|
-
thought_signature: NotRequired[str]
|
|
922
|
-
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
907
|
+
@model_serializer(mode="wrap")
|
|
908
|
+
def serialize_model(self, handler):
|
|
909
|
+
optional_fields = set(["include_usage"])
|
|
910
|
+
serialized = handler(self)
|
|
911
|
+
m = {}
|
|
923
912
|
|
|
913
|
+
for n, f in type(self).model_fields.items():
|
|
914
|
+
k = f.alias or n
|
|
915
|
+
val = serialized.get(k)
|
|
924
916
|
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
917
|
+
if val != UNSET_SENTINEL:
|
|
918
|
+
if val is not None or k not in optional_fields:
|
|
919
|
+
m[k] = val
|
|
928
920
|
|
|
929
|
-
|
|
930
|
-
r"""The type of the tool. Currently, only `function` is supported."""
|
|
921
|
+
return m
|
|
931
922
|
|
|
932
|
-
function: CreatePromptMessagesFunction
|
|
933
923
|
|
|
934
|
-
|
|
935
|
-
|
|
924
|
+
CreatePromptThinkingTypedDict = TypeAliasType(
|
|
925
|
+
"CreatePromptThinkingTypedDict",
|
|
926
|
+
Union[ThinkingConfigDisabledSchemaTypedDict, ThinkingConfigEnabledSchemaTypedDict],
|
|
927
|
+
)
|
|
936
928
|
|
|
937
929
|
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
name: NotRequired[str]
|
|
946
|
-
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
947
|
-
audio: NotRequired[Nullable[CreatePromptMessagesAudioTypedDict]]
|
|
948
|
-
r"""Data about a previous audio response from the model."""
|
|
949
|
-
tool_calls: NotRequired[List[CreatePromptMessagesToolCallsTypedDict]]
|
|
950
|
-
r"""The tool calls generated by the model, such as function calls."""
|
|
930
|
+
CreatePromptThinking = Annotated[
|
|
931
|
+
Union[
|
|
932
|
+
Annotated[ThinkingConfigDisabledSchema, Tag("disabled")],
|
|
933
|
+
Annotated[ThinkingConfigEnabledSchema, Tag("enabled")],
|
|
934
|
+
],
|
|
935
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
936
|
+
]
|
|
951
937
|
|
|
952
938
|
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
r"""The role of the messages author, in this case `assistant`."""
|
|
939
|
+
CreatePromptToolChoiceType = Literal["function",]
|
|
940
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
956
941
|
|
|
957
|
-
content: OptionalNullable[CreatePromptMessagesPromptsRequestContent] = UNSET
|
|
958
|
-
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
959
942
|
|
|
960
|
-
|
|
961
|
-
|
|
943
|
+
class CreatePromptToolChoiceFunctionTypedDict(TypedDict):
|
|
944
|
+
name: str
|
|
945
|
+
r"""The name of the function to call."""
|
|
962
946
|
|
|
963
|
-
name: Optional[str] = None
|
|
964
|
-
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
965
947
|
|
|
966
|
-
|
|
967
|
-
|
|
948
|
+
class CreatePromptToolChoiceFunction(BaseModel):
|
|
949
|
+
name: str
|
|
950
|
+
r"""The name of the function to call."""
|
|
968
951
|
|
|
969
|
-
|
|
970
|
-
|
|
952
|
+
|
|
953
|
+
class CreatePromptToolChoice2TypedDict(TypedDict):
|
|
954
|
+
function: CreatePromptToolChoiceFunctionTypedDict
|
|
955
|
+
type: NotRequired[CreatePromptToolChoiceType]
|
|
956
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
957
|
+
|
|
958
|
+
|
|
959
|
+
class CreatePromptToolChoice2(BaseModel):
|
|
960
|
+
function: CreatePromptToolChoiceFunction
|
|
961
|
+
|
|
962
|
+
type: Optional[CreatePromptToolChoiceType] = None
|
|
963
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
971
964
|
|
|
972
965
|
@model_serializer(mode="wrap")
|
|
973
966
|
def serialize_model(self, handler):
|
|
974
|
-
optional_fields = ["
|
|
975
|
-
nullable_fields = ["content", "refusal", "audio"]
|
|
976
|
-
null_default_fields = []
|
|
977
|
-
|
|
967
|
+
optional_fields = set(["type"])
|
|
978
968
|
serialized = handler(self)
|
|
979
|
-
|
|
980
969
|
m = {}
|
|
981
970
|
|
|
982
971
|
for n, f in type(self).model_fields.items():
|
|
983
972
|
k = f.alias or n
|
|
984
973
|
val = serialized.get(k)
|
|
985
|
-
serialized.pop(k, None)
|
|
986
|
-
|
|
987
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
988
|
-
is_set = (
|
|
989
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
990
|
-
or k in null_default_fields
|
|
991
|
-
) # pylint: disable=no-member
|
|
992
974
|
|
|
993
|
-
if val
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
997
|
-
):
|
|
998
|
-
m[k] = val
|
|
975
|
+
if val != UNSET_SENTINEL:
|
|
976
|
+
if val is not None or k not in optional_fields:
|
|
977
|
+
m[k] = val
|
|
999
978
|
|
|
1000
979
|
return m
|
|
1001
980
|
|
|
1002
981
|
|
|
1003
|
-
|
|
1004
|
-
|
|
982
|
+
CreatePromptToolChoice1 = Literal[
|
|
983
|
+
"none",
|
|
984
|
+
"auto",
|
|
985
|
+
"required",
|
|
986
|
+
]
|
|
1005
987
|
|
|
1006
988
|
|
|
1007
|
-
|
|
1008
|
-
|
|
989
|
+
CreatePromptToolChoiceTypedDict = TypeAliasType(
|
|
990
|
+
"CreatePromptToolChoiceTypedDict",
|
|
991
|
+
Union[CreatePromptToolChoice2TypedDict, CreatePromptToolChoice1],
|
|
992
|
+
)
|
|
993
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1009
994
|
|
|
1010
995
|
|
|
1011
|
-
|
|
1012
|
-
|
|
996
|
+
CreatePromptToolChoice = TypeAliasType(
|
|
997
|
+
"CreatePromptToolChoice", Union[CreatePromptToolChoice2, CreatePromptToolChoice1]
|
|
998
|
+
)
|
|
999
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1013
1000
|
|
|
1014
1001
|
|
|
1015
|
-
|
|
1016
|
-
"
|
|
1017
|
-
"
|
|
1002
|
+
CreatePromptModalities = Literal[
|
|
1003
|
+
"text",
|
|
1004
|
+
"audio",
|
|
1018
1005
|
]
|
|
1019
|
-
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1020
1006
|
|
|
1021
|
-
- `5m`: 5 minutes
|
|
1022
|
-
- `1h`: 1 hour
|
|
1023
1007
|
|
|
1024
|
-
|
|
1025
|
-
""
|
|
1008
|
+
CreatePromptID1 = Literal[
|
|
1009
|
+
"orq_pii_detection",
|
|
1010
|
+
"orq_sexual_moderation",
|
|
1011
|
+
"orq_harmful_moderation",
|
|
1012
|
+
]
|
|
1013
|
+
r"""The key of the guardrail."""
|
|
1026
1014
|
|
|
1027
1015
|
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
ttl: NotRequired[CreatePrompt2TTL]
|
|
1032
|
-
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1016
|
+
CreatePromptIDTypedDict = TypeAliasType(
|
|
1017
|
+
"CreatePromptIDTypedDict", Union[CreatePromptID1, str]
|
|
1018
|
+
)
|
|
1033
1019
|
|
|
1034
|
-
- `5m`: 5 minutes
|
|
1035
|
-
- `1h`: 1 hour
|
|
1036
1020
|
|
|
1037
|
-
|
|
1038
|
-
"""
|
|
1021
|
+
CreatePromptID = TypeAliasType("CreatePromptID", Union[CreatePromptID1, str])
|
|
1039
1022
|
|
|
1040
1023
|
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1024
|
+
CreatePromptExecuteOn = Literal[
|
|
1025
|
+
"input",
|
|
1026
|
+
"output",
|
|
1027
|
+
]
|
|
1028
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1044
1029
|
|
|
1045
|
-
ttl: Optional[CreatePrompt2TTL] = "5m"
|
|
1046
|
-
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1047
1030
|
|
|
1048
|
-
|
|
1049
|
-
|
|
1031
|
+
class CreatePromptGuardrailsTypedDict(TypedDict):
|
|
1032
|
+
id: CreatePromptIDTypedDict
|
|
1033
|
+
execute_on: CreatePromptExecuteOn
|
|
1034
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1050
1035
|
|
|
1051
|
-
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1052
|
-
"""
|
|
1053
1036
|
|
|
1037
|
+
class CreatePromptGuardrails(BaseModel):
|
|
1038
|
+
id: CreatePromptID
|
|
1054
1039
|
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
r"""The type of the content part. Always `file`."""
|
|
1058
|
-
file: FileContentPartSchemaTypedDict
|
|
1059
|
-
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
1060
|
-
cache_control: NotRequired[CreatePrompt2CacheControlTypedDict]
|
|
1040
|
+
execute_on: CreatePromptExecuteOn
|
|
1041
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1061
1042
|
|
|
1062
1043
|
|
|
1063
|
-
class
|
|
1064
|
-
|
|
1065
|
-
r"""
|
|
1044
|
+
class CreatePromptFallbacksTypedDict(TypedDict):
|
|
1045
|
+
model: str
|
|
1046
|
+
r"""Fallback model identifier"""
|
|
1066
1047
|
|
|
1067
|
-
file: FileContentPartSchema
|
|
1068
|
-
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
1069
1048
|
|
|
1070
|
-
|
|
1049
|
+
class CreatePromptFallbacks(BaseModel):
|
|
1050
|
+
model: str
|
|
1051
|
+
r"""Fallback model identifier"""
|
|
1071
1052
|
|
|
1072
1053
|
|
|
1073
|
-
|
|
1074
|
-
"
|
|
1075
|
-
Union[
|
|
1076
|
-
AudioContentPartSchemaTypedDict,
|
|
1077
|
-
TextContentPartSchemaTypedDict,
|
|
1078
|
-
ImageContentPartSchemaTypedDict,
|
|
1079
|
-
CreatePrompt24TypedDict,
|
|
1080
|
-
],
|
|
1081
|
-
)
|
|
1054
|
+
class CreatePromptRetryTypedDict(TypedDict):
|
|
1055
|
+
r"""Retry configuration for the request"""
|
|
1082
1056
|
|
|
1057
|
+
count: NotRequired[float]
|
|
1058
|
+
r"""Number of retry attempts (1-5)"""
|
|
1059
|
+
on_codes: NotRequired[List[float]]
|
|
1060
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1083
1061
|
|
|
1084
|
-
CreatePromptContentPrompts2 = Annotated[
|
|
1085
|
-
Union[
|
|
1086
|
-
Annotated[TextContentPartSchema, Tag("text")],
|
|
1087
|
-
Annotated[ImageContentPartSchema, Tag("image_url")],
|
|
1088
|
-
Annotated[AudioContentPartSchema, Tag("input_audio")],
|
|
1089
|
-
Annotated[CreatePrompt24, Tag("file")],
|
|
1090
|
-
],
|
|
1091
|
-
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1092
|
-
]
|
|
1093
1062
|
|
|
1063
|
+
class CreatePromptRetry(BaseModel):
|
|
1064
|
+
r"""Retry configuration for the request"""
|
|
1094
1065
|
|
|
1095
|
-
|
|
1096
|
-
"
|
|
1097
|
-
Union[str, List[CreatePromptContentPrompts2TypedDict]],
|
|
1098
|
-
)
|
|
1099
|
-
r"""The contents of the user message."""
|
|
1066
|
+
count: Optional[float] = 3
|
|
1067
|
+
r"""Number of retry attempts (1-5)"""
|
|
1100
1068
|
|
|
1069
|
+
on_codes: Optional[List[float]] = None
|
|
1070
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1101
1071
|
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
)
|
|
1105
|
-
|
|
1072
|
+
@model_serializer(mode="wrap")
|
|
1073
|
+
def serialize_model(self, handler):
|
|
1074
|
+
optional_fields = set(["count", "on_codes"])
|
|
1075
|
+
serialized = handler(self)
|
|
1076
|
+
m = {}
|
|
1106
1077
|
|
|
1078
|
+
for n, f in type(self).model_fields.items():
|
|
1079
|
+
k = f.alias or n
|
|
1080
|
+
val = serialized.get(k)
|
|
1107
1081
|
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
content: CreatePromptMessagesPromptsContentTypedDict
|
|
1112
|
-
r"""The contents of the user message."""
|
|
1113
|
-
name: NotRequired[str]
|
|
1114
|
-
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1082
|
+
if val != UNSET_SENTINEL:
|
|
1083
|
+
if val is not None or k not in optional_fields:
|
|
1084
|
+
m[k] = val
|
|
1115
1085
|
|
|
1086
|
+
return m
|
|
1116
1087
|
|
|
1117
|
-
class CreatePromptMessagesUserMessage(BaseModel):
|
|
1118
|
-
role: CreatePromptMessagesPromptsRole
|
|
1119
|
-
r"""The role of the messages author, in this case `user`."""
|
|
1120
1088
|
|
|
1121
|
-
|
|
1122
|
-
r"""The contents of the user message."""
|
|
1089
|
+
CreatePromptType = Literal["exact_match",]
|
|
1123
1090
|
|
|
1124
|
-
name: Optional[str] = None
|
|
1125
|
-
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1126
1091
|
|
|
1092
|
+
class CreatePromptCacheTypedDict(TypedDict):
|
|
1093
|
+
r"""Cache configuration for the request."""
|
|
1127
1094
|
|
|
1128
|
-
|
|
1129
|
-
|
|
1095
|
+
type: CreatePromptType
|
|
1096
|
+
ttl: NotRequired[float]
|
|
1097
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1130
1098
|
|
|
1131
1099
|
|
|
1132
|
-
|
|
1133
|
-
"
|
|
1134
|
-
Union[str, List[TextContentPartSchemaTypedDict]],
|
|
1135
|
-
)
|
|
1136
|
-
r"""The contents of the system message."""
|
|
1100
|
+
class CreatePromptCache(BaseModel):
|
|
1101
|
+
r"""Cache configuration for the request."""
|
|
1137
1102
|
|
|
1103
|
+
type: CreatePromptType
|
|
1138
1104
|
|
|
1139
|
-
|
|
1140
|
-
"
|
|
1141
|
-
)
|
|
1142
|
-
r"""The contents of the system message."""
|
|
1105
|
+
ttl: Optional[float] = 1800
|
|
1106
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1143
1107
|
|
|
1108
|
+
@model_serializer(mode="wrap")
|
|
1109
|
+
def serialize_model(self, handler):
|
|
1110
|
+
optional_fields = set(["ttl"])
|
|
1111
|
+
serialized = handler(self)
|
|
1112
|
+
m = {}
|
|
1144
1113
|
|
|
1145
|
-
|
|
1146
|
-
|
|
1114
|
+
for n, f in type(self).model_fields.items():
|
|
1115
|
+
k = f.alias or n
|
|
1116
|
+
val = serialized.get(k)
|
|
1147
1117
|
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
r"""The contents of the system message."""
|
|
1152
|
-
name: NotRequired[str]
|
|
1153
|
-
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1118
|
+
if val != UNSET_SENTINEL:
|
|
1119
|
+
if val is not None or k not in optional_fields:
|
|
1120
|
+
m[k] = val
|
|
1154
1121
|
|
|
1122
|
+
return m
|
|
1155
1123
|
|
|
1156
|
-
class CreatePromptMessagesSystemMessage(BaseModel):
|
|
1157
|
-
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
1158
|
-
|
|
1159
|
-
role: CreatePromptMessagesRole
|
|
1160
|
-
r"""The role of the messages author, in this case `system`."""
|
|
1161
1124
|
|
|
1162
|
-
|
|
1163
|
-
r"""The contents of the system message."""
|
|
1125
|
+
CreatePromptLoadBalancerType = Literal["weight_based",]
|
|
1164
1126
|
|
|
1165
|
-
name: Optional[str] = None
|
|
1166
|
-
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1167
1127
|
|
|
1128
|
+
class CreatePromptLoadBalancerModelsTypedDict(TypedDict):
|
|
1129
|
+
model: str
|
|
1130
|
+
r"""Model identifier for load balancing"""
|
|
1131
|
+
weight: NotRequired[float]
|
|
1132
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1168
1133
|
|
|
1169
|
-
CreatePromptPromptsMessagesTypedDict = TypeAliasType(
|
|
1170
|
-
"CreatePromptPromptsMessagesTypedDict",
|
|
1171
|
-
Union[
|
|
1172
|
-
CreatePromptMessagesSystemMessageTypedDict,
|
|
1173
|
-
CreatePromptMessagesUserMessageTypedDict,
|
|
1174
|
-
CreatePromptMessagesToolMessageTypedDict,
|
|
1175
|
-
CreatePromptMessagesAssistantMessageTypedDict,
|
|
1176
|
-
],
|
|
1177
|
-
)
|
|
1178
1134
|
|
|
1135
|
+
class CreatePromptLoadBalancerModels(BaseModel):
|
|
1136
|
+
model: str
|
|
1137
|
+
r"""Model identifier for load balancing"""
|
|
1179
1138
|
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
Annotated[CreatePromptMessagesSystemMessage, Tag("system")],
|
|
1183
|
-
Annotated[CreatePromptMessagesUserMessage, Tag("user")],
|
|
1184
|
-
Annotated[CreatePromptMessagesAssistantMessage, Tag("assistant")],
|
|
1185
|
-
Annotated[CreatePromptMessagesToolMessage, Tag("tool")],
|
|
1186
|
-
],
|
|
1187
|
-
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
1188
|
-
]
|
|
1139
|
+
weight: Optional[float] = 0.5
|
|
1140
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1189
1141
|
|
|
1142
|
+
@model_serializer(mode="wrap")
|
|
1143
|
+
def serialize_model(self, handler):
|
|
1144
|
+
optional_fields = set(["weight"])
|
|
1145
|
+
serialized = handler(self)
|
|
1146
|
+
m = {}
|
|
1190
1147
|
|
|
1191
|
-
|
|
1148
|
+
for n, f in type(self).model_fields.items():
|
|
1149
|
+
k = f.alias or n
|
|
1150
|
+
val = serialized.get(k)
|
|
1192
1151
|
|
|
1152
|
+
if val != UNSET_SENTINEL:
|
|
1153
|
+
if val is not None or k not in optional_fields:
|
|
1154
|
+
m[k] = val
|
|
1193
1155
|
|
|
1194
|
-
|
|
1195
|
-
name: str
|
|
1196
|
-
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
1197
|
-
description: NotRequired[str]
|
|
1198
|
-
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
1199
|
-
schema_: NotRequired[Any]
|
|
1200
|
-
r"""The schema for the response format, described as a JSON Schema object."""
|
|
1201
|
-
strict: NotRequired[bool]
|
|
1202
|
-
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
1156
|
+
return m
|
|
1203
1157
|
|
|
1204
1158
|
|
|
1205
|
-
class
|
|
1206
|
-
|
|
1207
|
-
|
|
1159
|
+
class CreatePromptLoadBalancer1TypedDict(TypedDict):
|
|
1160
|
+
type: CreatePromptLoadBalancerType
|
|
1161
|
+
models: List[CreatePromptLoadBalancerModelsTypedDict]
|
|
1208
1162
|
|
|
1209
|
-
description: Optional[str] = None
|
|
1210
|
-
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
1211
1163
|
|
|
1212
|
-
|
|
1213
|
-
|
|
1164
|
+
class CreatePromptLoadBalancer1(BaseModel):
|
|
1165
|
+
type: CreatePromptLoadBalancerType
|
|
1214
1166
|
|
|
1215
|
-
|
|
1216
|
-
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
1167
|
+
models: List[CreatePromptLoadBalancerModels]
|
|
1217
1168
|
|
|
1218
1169
|
|
|
1219
|
-
|
|
1220
|
-
|
|
1170
|
+
CreatePromptLoadBalancerTypedDict = CreatePromptLoadBalancer1TypedDict
|
|
1171
|
+
r"""Load balancer configuration for the request."""
|
|
1221
1172
|
|
|
1222
|
-
JSON Schema response format. Used to generate structured JSON responses
|
|
1223
|
-
"""
|
|
1224
1173
|
|
|
1225
|
-
|
|
1226
|
-
|
|
1174
|
+
CreatePromptLoadBalancer = CreatePromptLoadBalancer1
|
|
1175
|
+
r"""Load balancer configuration for the request."""
|
|
1227
1176
|
|
|
1228
1177
|
|
|
1229
|
-
class
|
|
1230
|
-
r"""
|
|
1178
|
+
class CreatePromptTimeoutTypedDict(TypedDict):
|
|
1179
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1231
1180
|
|
|
1232
|
-
|
|
1233
|
-
"""
|
|
1181
|
+
call_timeout: float
|
|
1182
|
+
r"""Timeout value in milliseconds"""
|
|
1234
1183
|
|
|
1235
|
-
type: CreatePromptResponseFormatPromptsRequestRequestBodyPrompt3Type
|
|
1236
1184
|
|
|
1237
|
-
|
|
1185
|
+
class CreatePromptTimeout(BaseModel):
|
|
1186
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1238
1187
|
|
|
1188
|
+
call_timeout: float
|
|
1189
|
+
r"""Timeout value in milliseconds"""
|
|
1239
1190
|
|
|
1240
|
-
CreatePromptResponseFormatPromptsRequestRequestBodyPromptType = Literal["json_object",]
|
|
1241
1191
|
|
|
1192
|
+
class PromptInputTypedDict(TypedDict):
|
|
1193
|
+
r"""Prompt configuration with model and messages."""
|
|
1242
1194
|
|
|
1243
|
-
|
|
1244
|
-
r"""
|
|
1195
|
+
messages: List[CreatePromptMessagesTypedDict]
|
|
1196
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
1197
|
+
model: NotRequired[str]
|
|
1198
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
1199
|
+
name: NotRequired[str]
|
|
1200
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1201
|
+
audio: NotRequired[Nullable[CreatePromptAudioTypedDict]]
|
|
1202
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1203
|
+
frequency_penalty: NotRequired[Nullable[float]]
|
|
1204
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
1205
|
+
max_tokens: NotRequired[Nullable[int]]
|
|
1206
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1245
1207
|
|
|
1246
|
-
|
|
1208
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
1247
1209
|
"""
|
|
1210
|
+
max_completion_tokens: NotRequired[Nullable[int]]
|
|
1211
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
1212
|
+
logprobs: NotRequired[Nullable[bool]]
|
|
1213
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
1214
|
+
top_logprobs: NotRequired[Nullable[int]]
|
|
1215
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
1216
|
+
n: NotRequired[Nullable[int]]
|
|
1217
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
1218
|
+
presence_penalty: NotRequired[Nullable[float]]
|
|
1219
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
1220
|
+
response_format: NotRequired[CreatePromptResponseFormatTypedDict]
|
|
1221
|
+
r"""An object specifying the format that the model must output"""
|
|
1222
|
+
reasoning_effort: NotRequired[CreatePromptReasoningEffort]
|
|
1223
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1248
1224
|
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
r"""
|
|
1225
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1226
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1227
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1228
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1254
1229
|
|
|
1255
|
-
|
|
1230
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
1256
1231
|
"""
|
|
1257
|
-
|
|
1258
|
-
|
|
1232
|
+
verbosity: NotRequired[str]
|
|
1233
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
1234
|
+
seed: NotRequired[Nullable[float]]
|
|
1235
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
1236
|
+
stop: NotRequired[Nullable[CreatePromptStopTypedDict]]
|
|
1237
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1238
|
+
stream_options: NotRequired[Nullable[CreatePromptStreamOptionsTypedDict]]
|
|
1239
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1240
|
+
thinking: NotRequired[CreatePromptThinkingTypedDict]
|
|
1241
|
+
temperature: NotRequired[Nullable[float]]
|
|
1242
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
1243
|
+
top_p: NotRequired[Nullable[float]]
|
|
1244
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
1245
|
+
top_k: NotRequired[Nullable[float]]
|
|
1246
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
1247
|
+
tool_choice: NotRequired[CreatePromptToolChoiceTypedDict]
|
|
1248
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1249
|
+
parallel_tool_calls: NotRequired[bool]
|
|
1250
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
1251
|
+
modalities: NotRequired[Nullable[List[CreatePromptModalities]]]
|
|
1252
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
1253
|
+
guardrails: NotRequired[List[CreatePromptGuardrailsTypedDict]]
|
|
1254
|
+
r"""A list of guardrails to apply to the request."""
|
|
1255
|
+
fallbacks: NotRequired[List[CreatePromptFallbacksTypedDict]]
|
|
1256
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1257
|
+
retry: NotRequired[CreatePromptRetryTypedDict]
|
|
1258
|
+
r"""Retry configuration for the request"""
|
|
1259
|
+
cache: NotRequired[CreatePromptCacheTypedDict]
|
|
1260
|
+
r"""Cache configuration for the request."""
|
|
1261
|
+
load_balancer: NotRequired[CreatePromptLoadBalancerTypedDict]
|
|
1262
|
+
r"""Load balancer configuration for the request."""
|
|
1263
|
+
timeout: NotRequired[CreatePromptTimeoutTypedDict]
|
|
1264
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1259
1265
|
|
|
1260
1266
|
|
|
1261
|
-
|
|
1267
|
+
class PromptInput(BaseModel):
|
|
1268
|
+
r"""Prompt configuration with model and messages."""
|
|
1262
1269
|
|
|
1270
|
+
messages: List[CreatePromptMessages]
|
|
1271
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
1263
1272
|
|
|
1264
|
-
|
|
1265
|
-
r"""
|
|
1273
|
+
model: Optional[str] = None
|
|
1274
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
1266
1275
|
|
|
1267
|
-
|
|
1268
|
-
"""
|
|
1276
|
+
name: Optional[str] = None
|
|
1277
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1269
1278
|
|
|
1270
|
-
|
|
1279
|
+
audio: OptionalNullable[CreatePromptAudio] = UNSET
|
|
1280
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1271
1281
|
|
|
1282
|
+
frequency_penalty: OptionalNullable[float] = UNSET
|
|
1283
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
1272
1284
|
|
|
1273
|
-
|
|
1274
|
-
r"""
|
|
1285
|
+
max_tokens: OptionalNullable[int] = UNSET
|
|
1286
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1275
1287
|
|
|
1276
|
-
|
|
1288
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
1277
1289
|
"""
|
|
1278
1290
|
|
|
1279
|
-
|
|
1291
|
+
max_completion_tokens: OptionalNullable[int] = UNSET
|
|
1292
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
1280
1293
|
|
|
1294
|
+
logprobs: OptionalNullable[bool] = UNSET
|
|
1295
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
1281
1296
|
|
|
1282
|
-
|
|
1283
|
-
"
|
|
1284
|
-
Union[
|
|
1285
|
-
CreatePromptResponseFormatTextTypedDict,
|
|
1286
|
-
CreatePromptResponseFormatJSONObjectTypedDict,
|
|
1287
|
-
CreatePromptResponseFormatPromptsJSONSchemaTypedDict,
|
|
1288
|
-
],
|
|
1289
|
-
)
|
|
1290
|
-
r"""An object specifying the format that the model must output"""
|
|
1297
|
+
top_logprobs: OptionalNullable[int] = UNSET
|
|
1298
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
1291
1299
|
|
|
1300
|
+
n: OptionalNullable[int] = UNSET
|
|
1301
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
1292
1302
|
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
Annotated[CreatePromptResponseFormatText, Tag("text")],
|
|
1296
|
-
Annotated[CreatePromptResponseFormatJSONObject, Tag("json_object")],
|
|
1297
|
-
Annotated[CreatePromptResponseFormatPromptsJSONSchema, Tag("json_schema")],
|
|
1298
|
-
],
|
|
1299
|
-
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1300
|
-
]
|
|
1301
|
-
r"""An object specifying the format that the model must output"""
|
|
1303
|
+
presence_penalty: OptionalNullable[float] = UNSET
|
|
1304
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
1302
1305
|
|
|
1306
|
+
response_format: Optional[CreatePromptResponseFormat] = None
|
|
1307
|
+
r"""An object specifying the format that the model must output"""
|
|
1303
1308
|
|
|
1304
|
-
|
|
1305
|
-
r"""
|
|
1309
|
+
reasoning_effort: Optional[CreatePromptReasoningEffort] = None
|
|
1310
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1306
1311
|
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
model
|
|
1310
|
-
|
|
1311
|
-
temperature: NotRequired[Nullable[float]]
|
|
1312
|
-
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
1313
|
-
max_tokens: NotRequired[Nullable[int]]
|
|
1314
|
-
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1312
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1313
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1314
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1315
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1315
1316
|
|
|
1316
|
-
|
|
1317
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
1317
1318
|
"""
|
|
1318
|
-
response_format: NotRequired[CreatePromptPromptsResponseFormatTypedDict]
|
|
1319
|
-
r"""An object specifying the format that the model must output"""
|
|
1320
1319
|
|
|
1320
|
+
verbosity: Optional[str] = None
|
|
1321
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
1321
1322
|
|
|
1322
|
-
|
|
1323
|
-
r"""
|
|
1323
|
+
seed: OptionalNullable[float] = UNSET
|
|
1324
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
1324
1325
|
|
|
1325
|
-
|
|
1326
|
-
r"""
|
|
1326
|
+
stop: OptionalNullable[CreatePromptStop] = UNSET
|
|
1327
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1327
1328
|
|
|
1328
|
-
|
|
1329
|
-
r"""
|
|
1329
|
+
stream_options: OptionalNullable[CreatePromptStreamOptions] = UNSET
|
|
1330
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1331
|
+
|
|
1332
|
+
thinking: Optional[CreatePromptThinking] = None
|
|
1330
1333
|
|
|
1331
1334
|
temperature: OptionalNullable[float] = UNSET
|
|
1332
1335
|
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
1333
1336
|
|
|
1334
|
-
|
|
1335
|
-
r"""
|
|
1337
|
+
top_p: OptionalNullable[float] = UNSET
|
|
1338
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
1336
1339
|
|
|
1337
|
-
|
|
1338
|
-
"""
|
|
1340
|
+
top_k: OptionalNullable[float] = UNSET
|
|
1341
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
1339
1342
|
|
|
1340
|
-
|
|
1341
|
-
r"""
|
|
1343
|
+
tool_choice: Optional[CreatePromptToolChoice] = None
|
|
1344
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1345
|
+
|
|
1346
|
+
parallel_tool_calls: Optional[bool] = None
|
|
1347
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
1348
|
+
|
|
1349
|
+
modalities: OptionalNullable[List[CreatePromptModalities]] = UNSET
|
|
1350
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
1351
|
+
|
|
1352
|
+
guardrails: Optional[List[CreatePromptGuardrails]] = None
|
|
1353
|
+
r"""A list of guardrails to apply to the request."""
|
|
1354
|
+
|
|
1355
|
+
fallbacks: Optional[List[CreatePromptFallbacks]] = None
|
|
1356
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1357
|
+
|
|
1358
|
+
retry: Optional[CreatePromptRetry] = None
|
|
1359
|
+
r"""Retry configuration for the request"""
|
|
1360
|
+
|
|
1361
|
+
cache: Optional[CreatePromptCache] = None
|
|
1362
|
+
r"""Cache configuration for the request."""
|
|
1363
|
+
|
|
1364
|
+
load_balancer: Optional[CreatePromptLoadBalancer] = None
|
|
1365
|
+
r"""Load balancer configuration for the request."""
|
|
1366
|
+
|
|
1367
|
+
timeout: Optional[CreatePromptTimeout] = None
|
|
1368
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1342
1369
|
|
|
1343
1370
|
@model_serializer(mode="wrap")
|
|
1344
1371
|
def serialize_model(self, handler):
|
|
1345
|
-
optional_fields =
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1372
|
+
optional_fields = set(
|
|
1373
|
+
[
|
|
1374
|
+
"model",
|
|
1375
|
+
"name",
|
|
1376
|
+
"audio",
|
|
1377
|
+
"frequency_penalty",
|
|
1378
|
+
"max_tokens",
|
|
1379
|
+
"max_completion_tokens",
|
|
1380
|
+
"logprobs",
|
|
1381
|
+
"top_logprobs",
|
|
1382
|
+
"n",
|
|
1383
|
+
"presence_penalty",
|
|
1384
|
+
"response_format",
|
|
1385
|
+
"reasoning_effort",
|
|
1386
|
+
"verbosity",
|
|
1387
|
+
"seed",
|
|
1388
|
+
"stop",
|
|
1389
|
+
"stream_options",
|
|
1390
|
+
"thinking",
|
|
1391
|
+
"temperature",
|
|
1392
|
+
"top_p",
|
|
1393
|
+
"top_k",
|
|
1394
|
+
"tool_choice",
|
|
1395
|
+
"parallel_tool_calls",
|
|
1396
|
+
"modalities",
|
|
1397
|
+
"guardrails",
|
|
1398
|
+
"fallbacks",
|
|
1399
|
+
"retry",
|
|
1400
|
+
"cache",
|
|
1401
|
+
"load_balancer",
|
|
1402
|
+
"timeout",
|
|
1403
|
+
]
|
|
1404
|
+
)
|
|
1405
|
+
nullable_fields = set(
|
|
1406
|
+
[
|
|
1407
|
+
"audio",
|
|
1408
|
+
"frequency_penalty",
|
|
1409
|
+
"max_tokens",
|
|
1410
|
+
"max_completion_tokens",
|
|
1411
|
+
"logprobs",
|
|
1412
|
+
"top_logprobs",
|
|
1413
|
+
"n",
|
|
1414
|
+
"presence_penalty",
|
|
1415
|
+
"seed",
|
|
1416
|
+
"stop",
|
|
1417
|
+
"stream_options",
|
|
1418
|
+
"temperature",
|
|
1419
|
+
"top_p",
|
|
1420
|
+
"top_k",
|
|
1421
|
+
"modalities",
|
|
1422
|
+
]
|
|
1423
|
+
)
|
|
1349
1424
|
serialized = handler(self)
|
|
1350
|
-
|
|
1351
1425
|
m = {}
|
|
1352
1426
|
|
|
1353
1427
|
for n, f in type(self).model_fields.items():
|
|
1354
1428
|
k = f.alias or n
|
|
1355
1429
|
val = serialized.get(k)
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
):
|
|
1369
|
-
m[k] = val
|
|
1430
|
+
is_nullable_and_explicitly_set = (
|
|
1431
|
+
k in nullable_fields
|
|
1432
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1433
|
+
)
|
|
1434
|
+
|
|
1435
|
+
if val != UNSET_SENTINEL:
|
|
1436
|
+
if (
|
|
1437
|
+
val is not None
|
|
1438
|
+
or k not in optional_fields
|
|
1439
|
+
or is_nullable_and_explicitly_set
|
|
1440
|
+
):
|
|
1441
|
+
m[k] = val
|
|
1370
1442
|
|
|
1371
1443
|
return m
|
|
1372
1444
|
|
|
@@ -1384,10 +1456,8 @@ class CreatePromptRequestBodyTypedDict(TypedDict):
|
|
|
1384
1456
|
description: NotRequired[Nullable[str]]
|
|
1385
1457
|
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
1386
1458
|
metadata: NotRequired[CreatePromptMetadataTypedDict]
|
|
1387
|
-
prompt_config: NotRequired[PromptConfigurationTypedDict]
|
|
1388
|
-
r"""[DEPRECATED]. Please use the `prompt` property instead. The current `prompt_config` will keep working but it will be deprecated in future versions. Configuration for the prompt including model and messages."""
|
|
1389
1459
|
prompt: NotRequired[PromptInputTypedDict]
|
|
1390
|
-
r"""Prompt configuration with model and messages.
|
|
1460
|
+
r"""Prompt configuration with model and messages."""
|
|
1391
1461
|
|
|
1392
1462
|
|
|
1393
1463
|
class CreatePromptRequestBody(BaseModel):
|
|
@@ -1407,44 +1477,31 @@ class CreatePromptRequestBody(BaseModel):
|
|
|
1407
1477
|
|
|
1408
1478
|
metadata: Optional[CreatePromptMetadata] = None
|
|
1409
1479
|
|
|
1410
|
-
prompt_config: Annotated[
|
|
1411
|
-
Optional[PromptConfiguration],
|
|
1412
|
-
pydantic.Field(
|
|
1413
|
-
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
1414
|
-
),
|
|
1415
|
-
] = None
|
|
1416
|
-
r"""[DEPRECATED]. Please use the `prompt` property instead. The current `prompt_config` will keep working but it will be deprecated in future versions. Configuration for the prompt including model and messages."""
|
|
1417
|
-
|
|
1418
1480
|
prompt: Optional[PromptInput] = None
|
|
1419
|
-
r"""Prompt configuration with model and messages.
|
|
1481
|
+
r"""Prompt configuration with model and messages."""
|
|
1420
1482
|
|
|
1421
1483
|
@model_serializer(mode="wrap")
|
|
1422
1484
|
def serialize_model(self, handler):
|
|
1423
|
-
optional_fields = ["description", "metadata", "
|
|
1424
|
-
nullable_fields = ["description"]
|
|
1425
|
-
null_default_fields = []
|
|
1426
|
-
|
|
1485
|
+
optional_fields = set(["description", "metadata", "prompt"])
|
|
1486
|
+
nullable_fields = set(["description"])
|
|
1427
1487
|
serialized = handler(self)
|
|
1428
|
-
|
|
1429
1488
|
m = {}
|
|
1430
1489
|
|
|
1431
1490
|
for n, f in type(self).model_fields.items():
|
|
1432
1491
|
k = f.alias or n
|
|
1433
1492
|
val = serialized.get(k)
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
):
|
|
1447
|
-
m[k] = val
|
|
1493
|
+
is_nullable_and_explicitly_set = (
|
|
1494
|
+
k in nullable_fields
|
|
1495
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1496
|
+
)
|
|
1497
|
+
|
|
1498
|
+
if val != UNSET_SENTINEL:
|
|
1499
|
+
if (
|
|
1500
|
+
val is not None
|
|
1501
|
+
or k not in optional_fields
|
|
1502
|
+
or is_nullable_and_explicitly_set
|
|
1503
|
+
):
|
|
1504
|
+
m[k] = val
|
|
1448
1505
|
|
|
1449
1506
|
return m
|
|
1450
1507
|
|
|
@@ -1452,7 +1509,7 @@ class CreatePromptRequestBody(BaseModel):
|
|
|
1452
1509
|
CreatePromptPromptsType = Literal["prompt",]
|
|
1453
1510
|
|
|
1454
1511
|
|
|
1455
|
-
|
|
1512
|
+
ModelType = Literal[
|
|
1456
1513
|
"chat",
|
|
1457
1514
|
"completion",
|
|
1458
1515
|
"embedding",
|
|
@@ -1460,6 +1517,7 @@ CreatePromptModelType = Literal[
|
|
|
1460
1517
|
"tts",
|
|
1461
1518
|
"stt",
|
|
1462
1519
|
"rerank",
|
|
1520
|
+
"ocr",
|
|
1463
1521
|
"moderation",
|
|
1464
1522
|
"vision",
|
|
1465
1523
|
]
|
|
@@ -1500,39 +1558,47 @@ CreatePromptResponseFormat4 = Literal[
|
|
|
1500
1558
|
]
|
|
1501
1559
|
|
|
1502
1560
|
|
|
1503
|
-
|
|
1561
|
+
CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyPromptConfigModelParametersType = Literal[
|
|
1562
|
+
"text",
|
|
1563
|
+
]
|
|
1504
1564
|
|
|
1505
1565
|
|
|
1506
1566
|
class CreatePromptResponseFormat3TypedDict(TypedDict):
|
|
1507
|
-
type:
|
|
1567
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyPromptConfigModelParametersType
|
|
1508
1568
|
|
|
1509
1569
|
|
|
1510
1570
|
class CreatePromptResponseFormat3(BaseModel):
|
|
1511
|
-
type:
|
|
1571
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyPromptConfigModelParametersType
|
|
1512
1572
|
|
|
1513
1573
|
|
|
1514
|
-
|
|
1574
|
+
CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyPromptConfigType = Literal[
|
|
1575
|
+
"json_object",
|
|
1576
|
+
]
|
|
1515
1577
|
|
|
1516
1578
|
|
|
1517
1579
|
class CreatePromptResponseFormat2TypedDict(TypedDict):
|
|
1518
|
-
type:
|
|
1580
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyPromptConfigType
|
|
1519
1581
|
|
|
1520
1582
|
|
|
1521
1583
|
class CreatePromptResponseFormat2(BaseModel):
|
|
1522
|
-
type:
|
|
1584
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyPromptConfigType
|
|
1523
1585
|
|
|
1524
1586
|
|
|
1525
|
-
|
|
1587
|
+
CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyType = Literal[
|
|
1588
|
+
"json_schema",
|
|
1589
|
+
]
|
|
1526
1590
|
|
|
1527
1591
|
|
|
1528
|
-
class
|
|
1592
|
+
class CreatePromptResponseFormatPromptsResponse200ApplicationJSONJSONSchemaTypedDict(
|
|
1593
|
+
TypedDict
|
|
1594
|
+
):
|
|
1529
1595
|
name: str
|
|
1530
1596
|
schema_: Dict[str, Any]
|
|
1531
1597
|
description: NotRequired[str]
|
|
1532
1598
|
strict: NotRequired[bool]
|
|
1533
1599
|
|
|
1534
1600
|
|
|
1535
|
-
class
|
|
1601
|
+
class CreatePromptResponseFormatPromptsResponse200ApplicationJSONJSONSchema(BaseModel):
|
|
1536
1602
|
name: str
|
|
1537
1603
|
|
|
1538
1604
|
schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
|
|
@@ -1541,20 +1607,54 @@ class CreatePromptResponseFormatPromptsResponseJSONSchema(BaseModel):
|
|
|
1541
1607
|
|
|
1542
1608
|
strict: Optional[bool] = None
|
|
1543
1609
|
|
|
1610
|
+
@model_serializer(mode="wrap")
|
|
1611
|
+
def serialize_model(self, handler):
|
|
1612
|
+
optional_fields = set(["description", "strict"])
|
|
1613
|
+
serialized = handler(self)
|
|
1614
|
+
m = {}
|
|
1615
|
+
|
|
1616
|
+
for n, f in type(self).model_fields.items():
|
|
1617
|
+
k = f.alias or n
|
|
1618
|
+
val = serialized.get(k)
|
|
1619
|
+
|
|
1620
|
+
if val != UNSET_SENTINEL:
|
|
1621
|
+
if val is not None or k not in optional_fields:
|
|
1622
|
+
m[k] = val
|
|
1623
|
+
|
|
1624
|
+
return m
|
|
1625
|
+
|
|
1544
1626
|
|
|
1545
1627
|
class CreatePromptResponseFormat1TypedDict(TypedDict):
|
|
1546
|
-
type:
|
|
1547
|
-
json_schema:
|
|
1628
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
|
|
1629
|
+
json_schema: (
|
|
1630
|
+
CreatePromptResponseFormatPromptsResponse200ApplicationJSONJSONSchemaTypedDict
|
|
1631
|
+
)
|
|
1548
1632
|
display_name: NotRequired[str]
|
|
1549
1633
|
|
|
1550
1634
|
|
|
1551
1635
|
class CreatePromptResponseFormat1(BaseModel):
|
|
1552
|
-
type:
|
|
1636
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
|
|
1553
1637
|
|
|
1554
|
-
json_schema:
|
|
1638
|
+
json_schema: CreatePromptResponseFormatPromptsResponse200ApplicationJSONJSONSchema
|
|
1555
1639
|
|
|
1556
1640
|
display_name: Optional[str] = None
|
|
1557
1641
|
|
|
1642
|
+
@model_serializer(mode="wrap")
|
|
1643
|
+
def serialize_model(self, handler):
|
|
1644
|
+
optional_fields = set(["display_name"])
|
|
1645
|
+
serialized = handler(self)
|
|
1646
|
+
m = {}
|
|
1647
|
+
|
|
1648
|
+
for n, f in type(self).model_fields.items():
|
|
1649
|
+
k = f.alias or n
|
|
1650
|
+
val = serialized.get(k)
|
|
1651
|
+
|
|
1652
|
+
if val != UNSET_SENTINEL:
|
|
1653
|
+
if val is not None or k not in optional_fields:
|
|
1654
|
+
m[k] = val
|
|
1655
|
+
|
|
1656
|
+
return m
|
|
1657
|
+
|
|
1558
1658
|
|
|
1559
1659
|
CreatePromptPromptsResponseResponseFormatTypedDict = TypeAliasType(
|
|
1560
1660
|
"CreatePromptPromptsResponseResponseFormatTypedDict",
|
|
@@ -1612,7 +1712,7 @@ CreatePromptEncodingFormat = Literal[
|
|
|
1612
1712
|
r"""The format to return the embeddings"""
|
|
1613
1713
|
|
|
1614
1714
|
|
|
1615
|
-
|
|
1715
|
+
CreatePromptPromptsResponseReasoningEffort = Literal[
|
|
1616
1716
|
"none",
|
|
1617
1717
|
"disable",
|
|
1618
1718
|
"minimal",
|
|
@@ -1631,14 +1731,14 @@ CreatePromptVerbosity = Literal[
|
|
|
1631
1731
|
r"""Controls the verbosity of the model output."""
|
|
1632
1732
|
|
|
1633
1733
|
|
|
1634
|
-
|
|
1734
|
+
CreatePromptThinkingLevel = Literal[
|
|
1635
1735
|
"low",
|
|
1636
1736
|
"high",
|
|
1637
1737
|
]
|
|
1638
1738
|
r"""The level of thinking to use for the model. Only supported by `Google AI`"""
|
|
1639
1739
|
|
|
1640
1740
|
|
|
1641
|
-
class
|
|
1741
|
+
class ModelParametersTypedDict(TypedDict):
|
|
1642
1742
|
r"""Model Parameters: Not all parameters apply to every model"""
|
|
1643
1743
|
|
|
1644
1744
|
temperature: NotRequired[float]
|
|
@@ -1680,17 +1780,17 @@ class CreatePromptModelParametersTypedDict(TypedDict):
|
|
|
1680
1780
|
r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
|
|
1681
1781
|
encoding_format: NotRequired[CreatePromptEncodingFormat]
|
|
1682
1782
|
r"""The format to return the embeddings"""
|
|
1683
|
-
reasoning_effort: NotRequired[
|
|
1783
|
+
reasoning_effort: NotRequired[CreatePromptPromptsResponseReasoningEffort]
|
|
1684
1784
|
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
1685
1785
|
budget_tokens: NotRequired[float]
|
|
1686
1786
|
r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
|
|
1687
1787
|
verbosity: NotRequired[CreatePromptVerbosity]
|
|
1688
1788
|
r"""Controls the verbosity of the model output."""
|
|
1689
|
-
thinking_level: NotRequired[
|
|
1789
|
+
thinking_level: NotRequired[CreatePromptThinkingLevel]
|
|
1690
1790
|
r"""The level of thinking to use for the model. Only supported by `Google AI`"""
|
|
1691
1791
|
|
|
1692
1792
|
|
|
1693
|
-
class
|
|
1793
|
+
class ModelParameters(BaseModel):
|
|
1694
1794
|
r"""Model Parameters: Not all parameters apply to every model"""
|
|
1695
1795
|
|
|
1696
1796
|
temperature: Optional[float] = None
|
|
@@ -1757,7 +1857,7 @@ class CreatePromptModelParameters(BaseModel):
|
|
|
1757
1857
|
r"""The format to return the embeddings"""
|
|
1758
1858
|
|
|
1759
1859
|
reasoning_effort: Annotated[
|
|
1760
|
-
Optional[
|
|
1860
|
+
Optional[CreatePromptPromptsResponseReasoningEffort],
|
|
1761
1861
|
pydantic.Field(alias="reasoningEffort"),
|
|
1762
1862
|
] = None
|
|
1763
1863
|
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
@@ -1767,403 +1867,1803 @@ class CreatePromptModelParameters(BaseModel):
|
|
|
1767
1867
|
)
|
|
1768
1868
|
r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
|
|
1769
1869
|
|
|
1770
|
-
verbosity: Optional[CreatePromptVerbosity] = None
|
|
1771
|
-
r"""Controls the verbosity of the model output."""
|
|
1870
|
+
verbosity: Optional[CreatePromptVerbosity] = None
|
|
1871
|
+
r"""Controls the verbosity of the model output."""
|
|
1872
|
+
|
|
1873
|
+
thinking_level: Annotated[
|
|
1874
|
+
Optional[CreatePromptThinkingLevel], pydantic.Field(alias="thinkingLevel")
|
|
1875
|
+
] = None
|
|
1876
|
+
r"""The level of thinking to use for the model. Only supported by `Google AI`"""
|
|
1877
|
+
|
|
1878
|
+
@model_serializer(mode="wrap")
|
|
1879
|
+
def serialize_model(self, handler):
|
|
1880
|
+
optional_fields = set(
|
|
1881
|
+
[
|
|
1882
|
+
"temperature",
|
|
1883
|
+
"maxTokens",
|
|
1884
|
+
"topK",
|
|
1885
|
+
"topP",
|
|
1886
|
+
"frequencyPenalty",
|
|
1887
|
+
"presencePenalty",
|
|
1888
|
+
"numImages",
|
|
1889
|
+
"seed",
|
|
1890
|
+
"format",
|
|
1891
|
+
"dimensions",
|
|
1892
|
+
"quality",
|
|
1893
|
+
"style",
|
|
1894
|
+
"responseFormat",
|
|
1895
|
+
"photoRealVersion",
|
|
1896
|
+
"encoding_format",
|
|
1897
|
+
"reasoningEffort",
|
|
1898
|
+
"budgetTokens",
|
|
1899
|
+
"verbosity",
|
|
1900
|
+
"thinkingLevel",
|
|
1901
|
+
]
|
|
1902
|
+
)
|
|
1903
|
+
nullable_fields = set(["responseFormat"])
|
|
1904
|
+
serialized = handler(self)
|
|
1905
|
+
m = {}
|
|
1906
|
+
|
|
1907
|
+
for n, f in type(self).model_fields.items():
|
|
1908
|
+
k = f.alias or n
|
|
1909
|
+
val = serialized.get(k)
|
|
1910
|
+
is_nullable_and_explicitly_set = (
|
|
1911
|
+
k in nullable_fields
|
|
1912
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1913
|
+
)
|
|
1914
|
+
|
|
1915
|
+
if val != UNSET_SENTINEL:
|
|
1916
|
+
if (
|
|
1917
|
+
val is not None
|
|
1918
|
+
or k not in optional_fields
|
|
1919
|
+
or is_nullable_and_explicitly_set
|
|
1920
|
+
):
|
|
1921
|
+
m[k] = val
|
|
1922
|
+
|
|
1923
|
+
return m
|
|
1924
|
+
|
|
1925
|
+
|
|
1926
|
+
CreatePromptProvider = Literal[
|
|
1927
|
+
"openai",
|
|
1928
|
+
"groq",
|
|
1929
|
+
"cohere",
|
|
1930
|
+
"azure",
|
|
1931
|
+
"aws",
|
|
1932
|
+
"google",
|
|
1933
|
+
"google-ai",
|
|
1934
|
+
"huggingface",
|
|
1935
|
+
"togetherai",
|
|
1936
|
+
"perplexity",
|
|
1937
|
+
"anthropic",
|
|
1938
|
+
"leonardoai",
|
|
1939
|
+
"fal",
|
|
1940
|
+
"nvidia",
|
|
1941
|
+
"jina",
|
|
1942
|
+
"elevenlabs",
|
|
1943
|
+
"litellm",
|
|
1944
|
+
"cerebras",
|
|
1945
|
+
"openailike",
|
|
1946
|
+
"bytedance",
|
|
1947
|
+
"mistral",
|
|
1948
|
+
"deepseek",
|
|
1949
|
+
"contextualai",
|
|
1950
|
+
"moonshotai",
|
|
1951
|
+
"zai",
|
|
1952
|
+
"slack",
|
|
1953
|
+
]
|
|
1954
|
+
|
|
1955
|
+
|
|
1956
|
+
CreatePromptRole = Literal[
|
|
1957
|
+
"system",
|
|
1958
|
+
"assistant",
|
|
1959
|
+
"user",
|
|
1960
|
+
"exception",
|
|
1961
|
+
"tool",
|
|
1962
|
+
"prompt",
|
|
1963
|
+
"correction",
|
|
1964
|
+
"expected_output",
|
|
1965
|
+
]
|
|
1966
|
+
r"""The role of the prompt message"""
|
|
1967
|
+
|
|
1968
|
+
|
|
1969
|
+
CreatePrompt2PromptsResponse200ApplicationJSONType = Literal["file",]
|
|
1970
|
+
r"""The type of the content part. Always `file`."""
|
|
1971
|
+
|
|
1972
|
+
|
|
1973
|
+
class CreatePrompt2FileTypedDict(TypedDict):
|
|
1974
|
+
file_data: NotRequired[str]
|
|
1975
|
+
r"""The file data as a data URI string in the format 'data:<mime-type>;base64,<base64-encoded-data>'. Example: 'data:image/png;base64,iVBORw0KGgoAAAANS...'"""
|
|
1976
|
+
uri: NotRequired[str]
|
|
1977
|
+
r"""URL to the file. Only supported by Anthropic Claude models for PDF files."""
|
|
1978
|
+
mime_type: NotRequired[str]
|
|
1979
|
+
r"""MIME type of the file (e.g., application/pdf, image/png)"""
|
|
1980
|
+
filename: NotRequired[str]
|
|
1981
|
+
r"""The name of the file, used when passing the file to the model as a string."""
|
|
1982
|
+
|
|
1983
|
+
|
|
1984
|
+
class CreatePrompt2File(BaseModel):
|
|
1985
|
+
file_data: Optional[str] = None
|
|
1986
|
+
r"""The file data as a data URI string in the format 'data:<mime-type>;base64,<base64-encoded-data>'. Example: 'data:image/png;base64,iVBORw0KGgoAAAANS...'"""
|
|
1987
|
+
|
|
1988
|
+
uri: Optional[str] = None
|
|
1989
|
+
r"""URL to the file. Only supported by Anthropic Claude models for PDF files."""
|
|
1990
|
+
|
|
1991
|
+
mime_type: Annotated[Optional[str], pydantic.Field(alias="mimeType")] = None
|
|
1992
|
+
r"""MIME type of the file (e.g., application/pdf, image/png)"""
|
|
1993
|
+
|
|
1994
|
+
filename: Optional[str] = None
|
|
1995
|
+
r"""The name of the file, used when passing the file to the model as a string."""
|
|
1996
|
+
|
|
1997
|
+
@model_serializer(mode="wrap")
|
|
1998
|
+
def serialize_model(self, handler):
|
|
1999
|
+
optional_fields = set(["file_data", "uri", "mimeType", "filename"])
|
|
2000
|
+
serialized = handler(self)
|
|
2001
|
+
m = {}
|
|
2002
|
+
|
|
2003
|
+
for n, f in type(self).model_fields.items():
|
|
2004
|
+
k = f.alias or n
|
|
2005
|
+
val = serialized.get(k)
|
|
2006
|
+
|
|
2007
|
+
if val != UNSET_SENTINEL:
|
|
2008
|
+
if val is not None or k not in optional_fields:
|
|
2009
|
+
m[k] = val
|
|
2010
|
+
|
|
2011
|
+
return m
|
|
2012
|
+
|
|
2013
|
+
|
|
2014
|
+
class CreatePrompt23TypedDict(TypedDict):
|
|
2015
|
+
type: CreatePrompt2PromptsResponse200ApplicationJSONType
|
|
2016
|
+
r"""The type of the content part. Always `file`."""
|
|
2017
|
+
file: CreatePrompt2FileTypedDict
|
|
2018
|
+
|
|
2019
|
+
|
|
2020
|
+
class CreatePrompt23(BaseModel):
|
|
2021
|
+
type: CreatePrompt2PromptsResponse200ApplicationJSONType
|
|
2022
|
+
r"""The type of the content part. Always `file`."""
|
|
2023
|
+
|
|
2024
|
+
file: CreatePrompt2File
|
|
2025
|
+
|
|
2026
|
+
|
|
2027
|
+
CreatePrompt2PromptsResponse200Type = Literal["image_url",]
|
|
2028
|
+
|
|
2029
|
+
|
|
2030
|
+
class CreatePrompt2ImageURLTypedDict(TypedDict):
|
|
2031
|
+
url: str
|
|
2032
|
+
r"""Either a URL of the image or the base64 encoded data URI."""
|
|
2033
|
+
id: NotRequired[str]
|
|
2034
|
+
r"""The orq.ai id of the image"""
|
|
2035
|
+
detail: NotRequired[str]
|
|
2036
|
+
r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
|
|
2037
|
+
|
|
2038
|
+
|
|
2039
|
+
class CreatePrompt2ImageURL(BaseModel):
|
|
2040
|
+
url: str
|
|
2041
|
+
r"""Either a URL of the image or the base64 encoded data URI."""
|
|
2042
|
+
|
|
2043
|
+
id: Optional[str] = None
|
|
2044
|
+
r"""The orq.ai id of the image"""
|
|
2045
|
+
|
|
2046
|
+
detail: Optional[str] = None
|
|
2047
|
+
r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
|
|
2048
|
+
|
|
2049
|
+
@model_serializer(mode="wrap")
|
|
2050
|
+
def serialize_model(self, handler):
|
|
2051
|
+
optional_fields = set(["id", "detail"])
|
|
2052
|
+
serialized = handler(self)
|
|
2053
|
+
m = {}
|
|
2054
|
+
|
|
2055
|
+
for n, f in type(self).model_fields.items():
|
|
2056
|
+
k = f.alias or n
|
|
2057
|
+
val = serialized.get(k)
|
|
2058
|
+
|
|
2059
|
+
if val != UNSET_SENTINEL:
|
|
2060
|
+
if val is not None or k not in optional_fields:
|
|
2061
|
+
m[k] = val
|
|
2062
|
+
|
|
2063
|
+
return m
|
|
2064
|
+
|
|
2065
|
+
|
|
2066
|
+
class CreatePrompt22TypedDict(TypedDict):
|
|
2067
|
+
r"""The image part of the prompt message. Only supported with vision models."""
|
|
2068
|
+
|
|
2069
|
+
type: CreatePrompt2PromptsResponse200Type
|
|
2070
|
+
image_url: CreatePrompt2ImageURLTypedDict
|
|
2071
|
+
|
|
2072
|
+
|
|
2073
|
+
class CreatePrompt22(BaseModel):
|
|
2074
|
+
r"""The image part of the prompt message. Only supported with vision models."""
|
|
2075
|
+
|
|
2076
|
+
type: CreatePrompt2PromptsResponse200Type
|
|
2077
|
+
|
|
2078
|
+
image_url: CreatePrompt2ImageURL
|
|
2079
|
+
|
|
2080
|
+
|
|
2081
|
+
CreatePrompt2PromptsResponseType = Literal["text",]
|
|
2082
|
+
|
|
2083
|
+
|
|
2084
|
+
class CreatePrompt21TypedDict(TypedDict):
|
|
2085
|
+
r"""Text content part of a prompt message"""
|
|
2086
|
+
|
|
2087
|
+
type: CreatePrompt2PromptsResponseType
|
|
2088
|
+
text: str
|
|
2089
|
+
|
|
2090
|
+
|
|
2091
|
+
class CreatePrompt21(BaseModel):
|
|
2092
|
+
r"""Text content part of a prompt message"""
|
|
2093
|
+
|
|
2094
|
+
type: CreatePrompt2PromptsResponseType
|
|
2095
|
+
|
|
2096
|
+
text: str
|
|
2097
|
+
|
|
2098
|
+
|
|
2099
|
+
CreatePromptContentPromptsResponse2TypedDict = TypeAliasType(
|
|
2100
|
+
"CreatePromptContentPromptsResponse2TypedDict",
|
|
2101
|
+
Union[CreatePrompt21TypedDict, CreatePrompt22TypedDict, CreatePrompt23TypedDict],
|
|
2102
|
+
)
|
|
2103
|
+
|
|
2104
|
+
|
|
2105
|
+
CreatePromptContentPromptsResponse2 = Annotated[
|
|
2106
|
+
Union[
|
|
2107
|
+
Annotated[CreatePrompt21, Tag("text")],
|
|
2108
|
+
Annotated[CreatePrompt22, Tag("image_url")],
|
|
2109
|
+
Annotated[CreatePrompt23, Tag("file")],
|
|
2110
|
+
],
|
|
2111
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2112
|
+
]
|
|
2113
|
+
|
|
2114
|
+
|
|
2115
|
+
CreatePromptContentTypedDict = TypeAliasType(
|
|
2116
|
+
"CreatePromptContentTypedDict",
|
|
2117
|
+
Union[str, List[CreatePromptContentPromptsResponse2TypedDict]],
|
|
2118
|
+
)
|
|
2119
|
+
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
2120
|
+
|
|
2121
|
+
|
|
2122
|
+
CreatePromptContent = TypeAliasType(
|
|
2123
|
+
"CreatePromptContent", Union[str, List[CreatePromptContentPromptsResponse2]]
|
|
2124
|
+
)
|
|
2125
|
+
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
2126
|
+
|
|
2127
|
+
|
|
2128
|
+
CreatePromptPromptsResponse200Type = Literal["function",]
|
|
2129
|
+
|
|
2130
|
+
|
|
2131
|
+
class CreatePromptFunctionTypedDict(TypedDict):
|
|
2132
|
+
name: str
|
|
2133
|
+
arguments: str
|
|
2134
|
+
r"""JSON string arguments for the functions"""
|
|
2135
|
+
|
|
2136
|
+
|
|
2137
|
+
class CreatePromptFunction(BaseModel):
|
|
2138
|
+
name: str
|
|
2139
|
+
|
|
2140
|
+
arguments: str
|
|
2141
|
+
r"""JSON string arguments for the functions"""
|
|
2142
|
+
|
|
2143
|
+
|
|
2144
|
+
class CreatePromptToolCallsTypedDict(TypedDict):
|
|
2145
|
+
type: CreatePromptPromptsResponse200Type
|
|
2146
|
+
function: CreatePromptFunctionTypedDict
|
|
2147
|
+
id: NotRequired[str]
|
|
2148
|
+
index: NotRequired[float]
|
|
2149
|
+
|
|
2150
|
+
|
|
2151
|
+
class CreatePromptToolCalls(BaseModel):
|
|
2152
|
+
type: CreatePromptPromptsResponse200Type
|
|
2153
|
+
|
|
2154
|
+
function: CreatePromptFunction
|
|
2155
|
+
|
|
2156
|
+
id: Optional[str] = None
|
|
2157
|
+
|
|
2158
|
+
index: Optional[float] = None
|
|
2159
|
+
|
|
2160
|
+
@model_serializer(mode="wrap")
|
|
2161
|
+
def serialize_model(self, handler):
|
|
2162
|
+
optional_fields = set(["id", "index"])
|
|
2163
|
+
serialized = handler(self)
|
|
2164
|
+
m = {}
|
|
2165
|
+
|
|
2166
|
+
for n, f in type(self).model_fields.items():
|
|
2167
|
+
k = f.alias or n
|
|
2168
|
+
val = serialized.get(k)
|
|
2169
|
+
|
|
2170
|
+
if val != UNSET_SENTINEL:
|
|
2171
|
+
if val is not None or k not in optional_fields:
|
|
2172
|
+
m[k] = val
|
|
2173
|
+
|
|
2174
|
+
return m
|
|
2175
|
+
|
|
2176
|
+
|
|
2177
|
+
class CreatePromptPromptsMessagesTypedDict(TypedDict):
|
|
2178
|
+
role: CreatePromptRole
|
|
2179
|
+
r"""The role of the prompt message"""
|
|
2180
|
+
content: Nullable[CreatePromptContentTypedDict]
|
|
2181
|
+
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
2182
|
+
tool_calls: NotRequired[List[CreatePromptToolCallsTypedDict]]
|
|
2183
|
+
tool_call_id: NotRequired[Nullable[str]]
|
|
2184
|
+
|
|
2185
|
+
|
|
2186
|
+
class CreatePromptPromptsMessages(BaseModel):
|
|
2187
|
+
role: CreatePromptRole
|
|
2188
|
+
r"""The role of the prompt message"""
|
|
2189
|
+
|
|
2190
|
+
content: Nullable[CreatePromptContent]
|
|
2191
|
+
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
2192
|
+
|
|
2193
|
+
tool_calls: Optional[List[CreatePromptToolCalls]] = None
|
|
2194
|
+
|
|
2195
|
+
tool_call_id: OptionalNullable[str] = UNSET
|
|
2196
|
+
|
|
2197
|
+
@model_serializer(mode="wrap")
|
|
2198
|
+
def serialize_model(self, handler):
|
|
2199
|
+
optional_fields = set(["tool_calls", "tool_call_id"])
|
|
2200
|
+
nullable_fields = set(["content", "tool_call_id"])
|
|
2201
|
+
serialized = handler(self)
|
|
2202
|
+
m = {}
|
|
2203
|
+
|
|
2204
|
+
for n, f in type(self).model_fields.items():
|
|
2205
|
+
k = f.alias or n
|
|
2206
|
+
val = serialized.get(k)
|
|
2207
|
+
is_nullable_and_explicitly_set = (
|
|
2208
|
+
k in nullable_fields
|
|
2209
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2210
|
+
)
|
|
2211
|
+
|
|
2212
|
+
if val != UNSET_SENTINEL:
|
|
2213
|
+
if (
|
|
2214
|
+
val is not None
|
|
2215
|
+
or k not in optional_fields
|
|
2216
|
+
or is_nullable_and_explicitly_set
|
|
2217
|
+
):
|
|
2218
|
+
m[k] = val
|
|
2219
|
+
|
|
2220
|
+
return m
|
|
2221
|
+
|
|
2222
|
+
|
|
2223
|
+
@deprecated(
|
|
2224
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2225
|
+
)
|
|
2226
|
+
class PromptConfigTypedDict(TypedDict):
|
|
2227
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
2228
|
+
|
|
2229
|
+
messages: List[CreatePromptPromptsMessagesTypedDict]
|
|
2230
|
+
stream: NotRequired[bool]
|
|
2231
|
+
model: NotRequired[Nullable[str]]
|
|
2232
|
+
model_db_id: NotRequired[Nullable[str]]
|
|
2233
|
+
r"""The id of the resource"""
|
|
2234
|
+
model_type: NotRequired[Nullable[ModelType]]
|
|
2235
|
+
r"""The modality of the model"""
|
|
2236
|
+
model_parameters: NotRequired[ModelParametersTypedDict]
|
|
2237
|
+
r"""Model Parameters: Not all parameters apply to every model"""
|
|
2238
|
+
provider: NotRequired[Nullable[CreatePromptProvider]]
|
|
2239
|
+
integration_id: NotRequired[Nullable[str]]
|
|
2240
|
+
r"""The ID of the integration to use"""
|
|
2241
|
+
version: NotRequired[str]
|
|
2242
|
+
|
|
2243
|
+
|
|
2244
|
+
@deprecated(
|
|
2245
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2246
|
+
)
|
|
2247
|
+
class PromptConfig(BaseModel):
|
|
2248
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
2249
|
+
|
|
2250
|
+
messages: List[CreatePromptPromptsMessages]
|
|
2251
|
+
|
|
2252
|
+
stream: Optional[bool] = None
|
|
2253
|
+
|
|
2254
|
+
model: OptionalNullable[str] = UNSET
|
|
2255
|
+
|
|
2256
|
+
model_db_id: OptionalNullable[str] = UNSET
|
|
2257
|
+
r"""The id of the resource"""
|
|
2258
|
+
|
|
2259
|
+
model_type: OptionalNullable[ModelType] = UNSET
|
|
2260
|
+
r"""The modality of the model"""
|
|
2261
|
+
|
|
2262
|
+
model_parameters: Optional[ModelParameters] = None
|
|
2263
|
+
r"""Model Parameters: Not all parameters apply to every model"""
|
|
2264
|
+
|
|
2265
|
+
provider: OptionalNullable[CreatePromptProvider] = UNSET
|
|
2266
|
+
|
|
2267
|
+
integration_id: OptionalNullable[str] = UNSET
|
|
2268
|
+
r"""The ID of the integration to use"""
|
|
2269
|
+
|
|
2270
|
+
version: Optional[str] = None
|
|
2271
|
+
|
|
2272
|
+
@model_serializer(mode="wrap")
|
|
2273
|
+
def serialize_model(self, handler):
|
|
2274
|
+
optional_fields = set(
|
|
2275
|
+
[
|
|
2276
|
+
"stream",
|
|
2277
|
+
"model",
|
|
2278
|
+
"model_db_id",
|
|
2279
|
+
"model_type",
|
|
2280
|
+
"model_parameters",
|
|
2281
|
+
"provider",
|
|
2282
|
+
"integration_id",
|
|
2283
|
+
"version",
|
|
2284
|
+
]
|
|
2285
|
+
)
|
|
2286
|
+
nullable_fields = set(
|
|
2287
|
+
["model", "model_db_id", "model_type", "provider", "integration_id"]
|
|
2288
|
+
)
|
|
2289
|
+
serialized = handler(self)
|
|
2290
|
+
m = {}
|
|
2291
|
+
|
|
2292
|
+
for n, f in type(self).model_fields.items():
|
|
2293
|
+
k = f.alias or n
|
|
2294
|
+
val = serialized.get(k)
|
|
2295
|
+
is_nullable_and_explicitly_set = (
|
|
2296
|
+
k in nullable_fields
|
|
2297
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2298
|
+
)
|
|
2299
|
+
|
|
2300
|
+
if val != UNSET_SENTINEL:
|
|
2301
|
+
if (
|
|
2302
|
+
val is not None
|
|
2303
|
+
or k not in optional_fields
|
|
2304
|
+
or is_nullable_and_explicitly_set
|
|
2305
|
+
):
|
|
2306
|
+
m[k] = val
|
|
2307
|
+
|
|
2308
|
+
return m
|
|
2309
|
+
|
|
2310
|
+
|
|
2311
|
+
CreatePromptPromptsVoice = Literal[
|
|
2312
|
+
"alloy",
|
|
2313
|
+
"echo",
|
|
2314
|
+
"fable",
|
|
2315
|
+
"onyx",
|
|
2316
|
+
"nova",
|
|
2317
|
+
"shimmer",
|
|
2318
|
+
]
|
|
2319
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
2320
|
+
|
|
2321
|
+
|
|
2322
|
+
CreatePromptPromptsResponse200Format = Literal[
|
|
2323
|
+
"wav",
|
|
2324
|
+
"mp3",
|
|
2325
|
+
"flac",
|
|
2326
|
+
"opus",
|
|
2327
|
+
"pcm16",
|
|
2328
|
+
]
|
|
2329
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
2330
|
+
|
|
2331
|
+
|
|
2332
|
+
class CreatePromptPromptsAudioTypedDict(TypedDict):
|
|
2333
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
2334
|
+
|
|
2335
|
+
voice: CreatePromptPromptsVoice
|
|
2336
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
2337
|
+
format_: CreatePromptPromptsResponse200Format
|
|
2338
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
2339
|
+
|
|
2340
|
+
|
|
2341
|
+
class CreatePromptPromptsAudio(BaseModel):
|
|
2342
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
2343
|
+
|
|
2344
|
+
voice: CreatePromptPromptsVoice
|
|
2345
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
2346
|
+
|
|
2347
|
+
format_: Annotated[
|
|
2348
|
+
CreatePromptPromptsResponse200Format, pydantic.Field(alias="format")
|
|
2349
|
+
]
|
|
2350
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
2351
|
+
|
|
2352
|
+
|
|
2353
|
+
CreatePromptResponseFormatPromptsResponse200ApplicationJSONType = Literal[
|
|
2354
|
+
"json_schema",
|
|
2355
|
+
]
|
|
2356
|
+
|
|
2357
|
+
|
|
2358
|
+
class CreatePromptResponseFormatPromptsResponseJSONSchemaTypedDict(TypedDict):
|
|
2359
|
+
name: str
|
|
2360
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2361
|
+
description: NotRequired[str]
|
|
2362
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
2363
|
+
schema_: NotRequired[Any]
|
|
2364
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
2365
|
+
strict: NotRequired[bool]
|
|
2366
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
2367
|
+
|
|
2368
|
+
|
|
2369
|
+
class CreatePromptResponseFormatPromptsResponseJSONSchema(BaseModel):
|
|
2370
|
+
name: str
|
|
2371
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2372
|
+
|
|
2373
|
+
description: Optional[str] = None
|
|
2374
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
2375
|
+
|
|
2376
|
+
schema_: Annotated[Optional[Any], pydantic.Field(alias="schema")] = None
|
|
2377
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
2378
|
+
|
|
2379
|
+
strict: Optional[bool] = False
|
|
2380
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
2381
|
+
|
|
2382
|
+
@model_serializer(mode="wrap")
|
|
2383
|
+
def serialize_model(self, handler):
|
|
2384
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
2385
|
+
serialized = handler(self)
|
|
2386
|
+
m = {}
|
|
2387
|
+
|
|
2388
|
+
for n, f in type(self).model_fields.items():
|
|
2389
|
+
k = f.alias or n
|
|
2390
|
+
val = serialized.get(k)
|
|
2391
|
+
|
|
2392
|
+
if val != UNSET_SENTINEL:
|
|
2393
|
+
if val is not None or k not in optional_fields:
|
|
2394
|
+
m[k] = val
|
|
2395
|
+
|
|
2396
|
+
return m
|
|
2397
|
+
|
|
2398
|
+
|
|
2399
|
+
class CreatePromptResponseFormatPromptsResponse200JSONSchemaTypedDict(TypedDict):
|
|
2400
|
+
r"""
|
|
2401
|
+
|
|
2402
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
2403
|
+
"""
|
|
2404
|
+
|
|
2405
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONType
|
|
2406
|
+
json_schema: CreatePromptResponseFormatPromptsResponseJSONSchemaTypedDict
|
|
2407
|
+
|
|
2408
|
+
|
|
2409
|
+
class CreatePromptResponseFormatPromptsResponse200JSONSchema(BaseModel):
|
|
2410
|
+
r"""
|
|
2411
|
+
|
|
2412
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
2413
|
+
"""
|
|
2414
|
+
|
|
2415
|
+
type: CreatePromptResponseFormatPromptsResponse200ApplicationJSONType
|
|
2416
|
+
|
|
2417
|
+
json_schema: CreatePromptResponseFormatPromptsResponseJSONSchema
|
|
2418
|
+
|
|
2419
|
+
|
|
2420
|
+
CreatePromptResponseFormatPromptsResponse200Type = Literal["json_object",]
|
|
2421
|
+
|
|
2422
|
+
|
|
2423
|
+
class CreatePromptResponseFormatPromptsJSONObjectTypedDict(TypedDict):
|
|
2424
|
+
r"""
|
|
2425
|
+
|
|
2426
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
2427
|
+
"""
|
|
2428
|
+
|
|
2429
|
+
type: CreatePromptResponseFormatPromptsResponse200Type
|
|
2430
|
+
|
|
2431
|
+
|
|
2432
|
+
class CreatePromptResponseFormatPromptsJSONObject(BaseModel):
|
|
2433
|
+
r"""
|
|
2434
|
+
|
|
2435
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
2436
|
+
"""
|
|
2437
|
+
|
|
2438
|
+
type: CreatePromptResponseFormatPromptsResponse200Type
|
|
2439
|
+
|
|
2440
|
+
|
|
2441
|
+
CreatePromptResponseFormatPromptsResponseType = Literal["text",]
|
|
2442
|
+
|
|
2443
|
+
|
|
2444
|
+
class CreatePromptResponseFormatPromptsTextTypedDict(TypedDict):
|
|
2445
|
+
r"""
|
|
2446
|
+
|
|
2447
|
+
Default response format. Used to generate text responses
|
|
2448
|
+
"""
|
|
2449
|
+
|
|
2450
|
+
type: CreatePromptResponseFormatPromptsResponseType
|
|
2451
|
+
|
|
2452
|
+
|
|
2453
|
+
class CreatePromptResponseFormatPromptsText(BaseModel):
|
|
2454
|
+
r"""
|
|
2455
|
+
|
|
2456
|
+
Default response format. Used to generate text responses
|
|
2457
|
+
"""
|
|
2458
|
+
|
|
2459
|
+
type: CreatePromptResponseFormatPromptsResponseType
|
|
2460
|
+
|
|
2461
|
+
|
|
2462
|
+
CreatePromptPromptsResponseFormatTypedDict = TypeAliasType(
|
|
2463
|
+
"CreatePromptPromptsResponseFormatTypedDict",
|
|
2464
|
+
Union[
|
|
2465
|
+
CreatePromptResponseFormatPromptsTextTypedDict,
|
|
2466
|
+
CreatePromptResponseFormatPromptsJSONObjectTypedDict,
|
|
2467
|
+
CreatePromptResponseFormatPromptsResponse200JSONSchemaTypedDict,
|
|
2468
|
+
],
|
|
2469
|
+
)
|
|
2470
|
+
r"""An object specifying the format that the model must output"""
|
|
2471
|
+
|
|
2472
|
+
|
|
2473
|
+
CreatePromptPromptsResponseFormat = Annotated[
|
|
2474
|
+
Union[
|
|
2475
|
+
Annotated[CreatePromptResponseFormatPromptsText, Tag("text")],
|
|
2476
|
+
Annotated[CreatePromptResponseFormatPromptsJSONObject, Tag("json_object")],
|
|
2477
|
+
Annotated[
|
|
2478
|
+
CreatePromptResponseFormatPromptsResponse200JSONSchema, Tag("json_schema")
|
|
2479
|
+
],
|
|
2480
|
+
],
|
|
2481
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2482
|
+
]
|
|
2483
|
+
r"""An object specifying the format that the model must output"""
|
|
2484
|
+
|
|
2485
|
+
|
|
2486
|
+
CreatePromptPromptsReasoningEffort = Literal[
|
|
2487
|
+
"none",
|
|
2488
|
+
"minimal",
|
|
2489
|
+
"low",
|
|
2490
|
+
"medium",
|
|
2491
|
+
"high",
|
|
2492
|
+
"xhigh",
|
|
2493
|
+
]
|
|
2494
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
2495
|
+
|
|
2496
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
2497
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
2498
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
2499
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
2500
|
+
|
|
2501
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
2502
|
+
"""
|
|
2503
|
+
|
|
2504
|
+
|
|
2505
|
+
CreatePromptPromptsStopTypedDict = TypeAliasType(
|
|
2506
|
+
"CreatePromptPromptsStopTypedDict", Union[str, List[str]]
|
|
2507
|
+
)
|
|
2508
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
2509
|
+
|
|
2510
|
+
|
|
2511
|
+
CreatePromptPromptsStop = TypeAliasType(
|
|
2512
|
+
"CreatePromptPromptsStop", Union[str, List[str]]
|
|
2513
|
+
)
|
|
2514
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
2515
|
+
|
|
2516
|
+
|
|
2517
|
+
class CreatePromptPromptsStreamOptionsTypedDict(TypedDict):
|
|
2518
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
2519
|
+
|
|
2520
|
+
include_usage: NotRequired[bool]
|
|
2521
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
2522
|
+
|
|
2523
|
+
|
|
2524
|
+
class CreatePromptPromptsStreamOptions(BaseModel):
|
|
2525
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
2526
|
+
|
|
2527
|
+
include_usage: Optional[bool] = None
|
|
2528
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
2529
|
+
|
|
2530
|
+
@model_serializer(mode="wrap")
|
|
2531
|
+
def serialize_model(self, handler):
|
|
2532
|
+
optional_fields = set(["include_usage"])
|
|
2533
|
+
serialized = handler(self)
|
|
2534
|
+
m = {}
|
|
2535
|
+
|
|
2536
|
+
for n, f in type(self).model_fields.items():
|
|
2537
|
+
k = f.alias or n
|
|
2538
|
+
val = serialized.get(k)
|
|
2539
|
+
|
|
2540
|
+
if val != UNSET_SENTINEL:
|
|
2541
|
+
if val is not None or k not in optional_fields:
|
|
2542
|
+
m[k] = val
|
|
2543
|
+
|
|
2544
|
+
return m
|
|
2545
|
+
|
|
2546
|
+
|
|
2547
|
+
CreatePromptPromptsThinkingTypedDict = TypeAliasType(
|
|
2548
|
+
"CreatePromptPromptsThinkingTypedDict",
|
|
2549
|
+
Union[ThinkingConfigDisabledSchemaTypedDict, ThinkingConfigEnabledSchemaTypedDict],
|
|
2550
|
+
)
|
|
2551
|
+
|
|
2552
|
+
|
|
2553
|
+
CreatePromptPromptsThinking = Annotated[
|
|
2554
|
+
Union[
|
|
2555
|
+
Annotated[ThinkingConfigDisabledSchema, Tag("disabled")],
|
|
2556
|
+
Annotated[ThinkingConfigEnabledSchema, Tag("enabled")],
|
|
2557
|
+
],
|
|
2558
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2559
|
+
]
|
|
2560
|
+
|
|
2561
|
+
|
|
2562
|
+
CreatePromptToolChoicePromptsType = Literal["function",]
|
|
2563
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
2564
|
+
|
|
2565
|
+
|
|
2566
|
+
class CreatePromptToolChoicePromptsFunctionTypedDict(TypedDict):
|
|
2567
|
+
name: str
|
|
2568
|
+
r"""The name of the function to call."""
|
|
2569
|
+
|
|
2570
|
+
|
|
2571
|
+
class CreatePromptToolChoicePromptsFunction(BaseModel):
|
|
2572
|
+
name: str
|
|
2573
|
+
r"""The name of the function to call."""
|
|
2574
|
+
|
|
2575
|
+
|
|
2576
|
+
class CreatePromptToolChoicePrompts2TypedDict(TypedDict):
|
|
2577
|
+
function: CreatePromptToolChoicePromptsFunctionTypedDict
|
|
2578
|
+
type: NotRequired[CreatePromptToolChoicePromptsType]
|
|
2579
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
2580
|
+
|
|
2581
|
+
|
|
2582
|
+
class CreatePromptToolChoicePrompts2(BaseModel):
|
|
2583
|
+
function: CreatePromptToolChoicePromptsFunction
|
|
2584
|
+
|
|
2585
|
+
type: Optional[CreatePromptToolChoicePromptsType] = None
|
|
2586
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
2587
|
+
|
|
2588
|
+
@model_serializer(mode="wrap")
|
|
2589
|
+
def serialize_model(self, handler):
|
|
2590
|
+
optional_fields = set(["type"])
|
|
2591
|
+
serialized = handler(self)
|
|
2592
|
+
m = {}
|
|
2593
|
+
|
|
2594
|
+
for n, f in type(self).model_fields.items():
|
|
2595
|
+
k = f.alias or n
|
|
2596
|
+
val = serialized.get(k)
|
|
2597
|
+
|
|
2598
|
+
if val != UNSET_SENTINEL:
|
|
2599
|
+
if val is not None or k not in optional_fields:
|
|
2600
|
+
m[k] = val
|
|
2601
|
+
|
|
2602
|
+
return m
|
|
2603
|
+
|
|
2604
|
+
|
|
2605
|
+
CreatePromptToolChoicePrompts1 = Literal[
|
|
2606
|
+
"none",
|
|
2607
|
+
"auto",
|
|
2608
|
+
"required",
|
|
2609
|
+
]
|
|
2610
|
+
|
|
2611
|
+
|
|
2612
|
+
CreatePromptPromptsToolChoiceTypedDict = TypeAliasType(
|
|
2613
|
+
"CreatePromptPromptsToolChoiceTypedDict",
|
|
2614
|
+
Union[CreatePromptToolChoicePrompts2TypedDict, CreatePromptToolChoicePrompts1],
|
|
2615
|
+
)
|
|
2616
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
2617
|
+
|
|
2618
|
+
|
|
2619
|
+
CreatePromptPromptsToolChoice = TypeAliasType(
|
|
2620
|
+
"CreatePromptPromptsToolChoice",
|
|
2621
|
+
Union[CreatePromptToolChoicePrompts2, CreatePromptToolChoicePrompts1],
|
|
2622
|
+
)
|
|
2623
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
2624
|
+
|
|
2625
|
+
|
|
2626
|
+
CreatePromptPromptsModalities = Literal[
|
|
2627
|
+
"text",
|
|
2628
|
+
"audio",
|
|
2629
|
+
]
|
|
2630
|
+
|
|
2631
|
+
|
|
2632
|
+
CreatePromptIDPrompts1 = Literal[
|
|
2633
|
+
"orq_pii_detection",
|
|
2634
|
+
"orq_sexual_moderation",
|
|
2635
|
+
"orq_harmful_moderation",
|
|
2636
|
+
]
|
|
2637
|
+
r"""The key of the guardrail."""
|
|
2638
|
+
|
|
2639
|
+
|
|
2640
|
+
CreatePromptPromptsIDTypedDict = TypeAliasType(
|
|
2641
|
+
"CreatePromptPromptsIDTypedDict", Union[CreatePromptIDPrompts1, str]
|
|
2642
|
+
)
|
|
2643
|
+
|
|
2644
|
+
|
|
2645
|
+
CreatePromptPromptsID = TypeAliasType(
|
|
2646
|
+
"CreatePromptPromptsID", Union[CreatePromptIDPrompts1, str]
|
|
2647
|
+
)
|
|
2648
|
+
|
|
2649
|
+
|
|
2650
|
+
CreatePromptPromptsExecuteOn = Literal[
|
|
2651
|
+
"input",
|
|
2652
|
+
"output",
|
|
2653
|
+
]
|
|
2654
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
2655
|
+
|
|
2656
|
+
|
|
2657
|
+
class CreatePromptPromptsGuardrailsTypedDict(TypedDict):
|
|
2658
|
+
id: CreatePromptPromptsIDTypedDict
|
|
2659
|
+
execute_on: CreatePromptPromptsExecuteOn
|
|
2660
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
2661
|
+
|
|
2662
|
+
|
|
2663
|
+
class CreatePromptPromptsGuardrails(BaseModel):
|
|
2664
|
+
id: CreatePromptPromptsID
|
|
2665
|
+
|
|
2666
|
+
execute_on: CreatePromptPromptsExecuteOn
|
|
2667
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
2668
|
+
|
|
2669
|
+
|
|
2670
|
+
class CreatePromptPromptsFallbacksTypedDict(TypedDict):
|
|
2671
|
+
model: str
|
|
2672
|
+
r"""Fallback model identifier"""
|
|
2673
|
+
|
|
2674
|
+
|
|
2675
|
+
class CreatePromptPromptsFallbacks(BaseModel):
|
|
2676
|
+
model: str
|
|
2677
|
+
r"""Fallback model identifier"""
|
|
2678
|
+
|
|
2679
|
+
|
|
2680
|
+
class CreatePromptPromptsRetryTypedDict(TypedDict):
|
|
2681
|
+
r"""Retry configuration for the request"""
|
|
2682
|
+
|
|
2683
|
+
count: NotRequired[float]
|
|
2684
|
+
r"""Number of retry attempts (1-5)"""
|
|
2685
|
+
on_codes: NotRequired[List[float]]
|
|
2686
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
2687
|
+
|
|
2688
|
+
|
|
2689
|
+
class CreatePromptPromptsRetry(BaseModel):
|
|
2690
|
+
r"""Retry configuration for the request"""
|
|
2691
|
+
|
|
2692
|
+
count: Optional[float] = 3
|
|
2693
|
+
r"""Number of retry attempts (1-5)"""
|
|
2694
|
+
|
|
2695
|
+
on_codes: Optional[List[float]] = None
|
|
2696
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
2697
|
+
|
|
2698
|
+
@model_serializer(mode="wrap")
|
|
2699
|
+
def serialize_model(self, handler):
|
|
2700
|
+
optional_fields = set(["count", "on_codes"])
|
|
2701
|
+
serialized = handler(self)
|
|
2702
|
+
m = {}
|
|
2703
|
+
|
|
2704
|
+
for n, f in type(self).model_fields.items():
|
|
2705
|
+
k = f.alias or n
|
|
2706
|
+
val = serialized.get(k)
|
|
2707
|
+
|
|
2708
|
+
if val != UNSET_SENTINEL:
|
|
2709
|
+
if val is not None or k not in optional_fields:
|
|
2710
|
+
m[k] = val
|
|
2711
|
+
|
|
2712
|
+
return m
|
|
2713
|
+
|
|
2714
|
+
|
|
2715
|
+
CreatePromptPromptsResponseType = Literal["exact_match",]
|
|
2716
|
+
|
|
2717
|
+
|
|
2718
|
+
class CreatePromptPromptsCacheTypedDict(TypedDict):
|
|
2719
|
+
r"""Cache configuration for the request."""
|
|
2720
|
+
|
|
2721
|
+
type: CreatePromptPromptsResponseType
|
|
2722
|
+
ttl: NotRequired[float]
|
|
2723
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
2724
|
+
|
|
2725
|
+
|
|
2726
|
+
class CreatePromptPromptsCache(BaseModel):
|
|
2727
|
+
r"""Cache configuration for the request."""
|
|
2728
|
+
|
|
2729
|
+
type: CreatePromptPromptsResponseType
|
|
2730
|
+
|
|
2731
|
+
ttl: Optional[float] = 1800
|
|
2732
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
2733
|
+
|
|
2734
|
+
@model_serializer(mode="wrap")
|
|
2735
|
+
def serialize_model(self, handler):
|
|
2736
|
+
optional_fields = set(["ttl"])
|
|
2737
|
+
serialized = handler(self)
|
|
2738
|
+
m = {}
|
|
2739
|
+
|
|
2740
|
+
for n, f in type(self).model_fields.items():
|
|
2741
|
+
k = f.alias or n
|
|
2742
|
+
val = serialized.get(k)
|
|
2743
|
+
|
|
2744
|
+
if val != UNSET_SENTINEL:
|
|
2745
|
+
if val is not None or k not in optional_fields:
|
|
2746
|
+
m[k] = val
|
|
2747
|
+
|
|
2748
|
+
return m
|
|
2749
|
+
|
|
2750
|
+
|
|
2751
|
+
CreatePromptLoadBalancerPromptsType = Literal["weight_based",]
|
|
2752
|
+
|
|
2753
|
+
|
|
2754
|
+
class CreatePromptLoadBalancerPromptsModelsTypedDict(TypedDict):
|
|
2755
|
+
model: str
|
|
2756
|
+
r"""Model identifier for load balancing"""
|
|
2757
|
+
weight: NotRequired[float]
|
|
2758
|
+
r"""Weight assigned to this model for load balancing"""
|
|
2759
|
+
|
|
2760
|
+
|
|
2761
|
+
class CreatePromptLoadBalancerPromptsModels(BaseModel):
|
|
2762
|
+
model: str
|
|
2763
|
+
r"""Model identifier for load balancing"""
|
|
2764
|
+
|
|
2765
|
+
weight: Optional[float] = 0.5
|
|
2766
|
+
r"""Weight assigned to this model for load balancing"""
|
|
2767
|
+
|
|
2768
|
+
@model_serializer(mode="wrap")
|
|
2769
|
+
def serialize_model(self, handler):
|
|
2770
|
+
optional_fields = set(["weight"])
|
|
2771
|
+
serialized = handler(self)
|
|
2772
|
+
m = {}
|
|
2773
|
+
|
|
2774
|
+
for n, f in type(self).model_fields.items():
|
|
2775
|
+
k = f.alias or n
|
|
2776
|
+
val = serialized.get(k)
|
|
2777
|
+
|
|
2778
|
+
if val != UNSET_SENTINEL:
|
|
2779
|
+
if val is not None or k not in optional_fields:
|
|
2780
|
+
m[k] = val
|
|
2781
|
+
|
|
2782
|
+
return m
|
|
2783
|
+
|
|
2784
|
+
|
|
2785
|
+
class CreatePromptLoadBalancerPrompts1TypedDict(TypedDict):
|
|
2786
|
+
type: CreatePromptLoadBalancerPromptsType
|
|
2787
|
+
models: List[CreatePromptLoadBalancerPromptsModelsTypedDict]
|
|
2788
|
+
|
|
2789
|
+
|
|
2790
|
+
class CreatePromptLoadBalancerPrompts1(BaseModel):
|
|
2791
|
+
type: CreatePromptLoadBalancerPromptsType
|
|
2792
|
+
|
|
2793
|
+
models: List[CreatePromptLoadBalancerPromptsModels]
|
|
2794
|
+
|
|
2795
|
+
|
|
2796
|
+
CreatePromptPromptsLoadBalancerTypedDict = CreatePromptLoadBalancerPrompts1TypedDict
|
|
2797
|
+
r"""Load balancer configuration for the request."""
|
|
2798
|
+
|
|
2799
|
+
|
|
2800
|
+
CreatePromptPromptsLoadBalancer = CreatePromptLoadBalancerPrompts1
|
|
2801
|
+
r"""Load balancer configuration for the request."""
|
|
2802
|
+
|
|
2803
|
+
|
|
2804
|
+
class CreatePromptPromptsTimeoutTypedDict(TypedDict):
|
|
2805
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2806
|
+
|
|
2807
|
+
call_timeout: float
|
|
2808
|
+
r"""Timeout value in milliseconds"""
|
|
2809
|
+
|
|
2810
|
+
|
|
2811
|
+
class CreatePromptPromptsTimeout(BaseModel):
|
|
2812
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2813
|
+
|
|
2814
|
+
call_timeout: float
|
|
2815
|
+
r"""Timeout value in milliseconds"""
|
|
2816
|
+
|
|
2817
|
+
|
|
2818
|
+
CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyRole = Literal["tool",]
|
|
2819
|
+
r"""The role of the messages author, in this case tool."""
|
|
2820
|
+
|
|
2821
|
+
|
|
2822
|
+
CreatePromptContentPromptsResponse200ApplicationJSONResponseBody2TypedDict = (
|
|
2823
|
+
TextContentPartSchemaTypedDict
|
|
2824
|
+
)
|
|
2825
|
+
|
|
2826
|
+
|
|
2827
|
+
CreatePromptContentPromptsResponse200ApplicationJSONResponseBody2 = (
|
|
2828
|
+
TextContentPartSchema
|
|
2829
|
+
)
|
|
2830
|
+
|
|
2831
|
+
|
|
2832
|
+
CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyContentTypedDict = TypeAliasType(
|
|
2833
|
+
"CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyContentTypedDict",
|
|
2834
|
+
Union[
|
|
2835
|
+
str,
|
|
2836
|
+
List[
|
|
2837
|
+
CreatePromptContentPromptsResponse200ApplicationJSONResponseBody2TypedDict
|
|
2838
|
+
],
|
|
2839
|
+
],
|
|
2840
|
+
)
|
|
2841
|
+
r"""The contents of the tool message."""
|
|
2842
|
+
|
|
2843
|
+
|
|
2844
|
+
CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyContent = (
|
|
2845
|
+
TypeAliasType(
|
|
2846
|
+
"CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyContent",
|
|
2847
|
+
Union[
|
|
2848
|
+
str, List[CreatePromptContentPromptsResponse200ApplicationJSONResponseBody2]
|
|
2849
|
+
],
|
|
2850
|
+
)
|
|
2851
|
+
)
|
|
2852
|
+
r"""The contents of the tool message."""
|
|
2853
|
+
|
|
2854
|
+
|
|
2855
|
+
CreatePromptMessagesPromptsResponse200Type = Literal["ephemeral",]
|
|
2856
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
2857
|
+
|
|
2858
|
+
|
|
2859
|
+
CreatePromptMessagesPromptsTTL = Literal[
|
|
2860
|
+
"5m",
|
|
2861
|
+
"1h",
|
|
2862
|
+
]
|
|
2863
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
2864
|
+
|
|
2865
|
+
- `5m`: 5 minutes
|
|
2866
|
+
- `1h`: 1 hour
|
|
2867
|
+
|
|
2868
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
2869
|
+
"""
|
|
2870
|
+
|
|
2871
|
+
|
|
2872
|
+
class CreatePromptMessagesPromptsCacheControlTypedDict(TypedDict):
|
|
2873
|
+
type: CreatePromptMessagesPromptsResponse200Type
|
|
2874
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
2875
|
+
ttl: NotRequired[CreatePromptMessagesPromptsTTL]
|
|
2876
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
2877
|
+
|
|
2878
|
+
- `5m`: 5 minutes
|
|
2879
|
+
- `1h`: 1 hour
|
|
2880
|
+
|
|
2881
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
2882
|
+
"""
|
|
2883
|
+
|
|
2884
|
+
|
|
2885
|
+
class CreatePromptMessagesPromptsCacheControl(BaseModel):
|
|
2886
|
+
type: CreatePromptMessagesPromptsResponse200Type
|
|
2887
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
2888
|
+
|
|
2889
|
+
ttl: Optional[CreatePromptMessagesPromptsTTL] = "5m"
|
|
2890
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
2891
|
+
|
|
2892
|
+
- `5m`: 5 minutes
|
|
2893
|
+
- `1h`: 1 hour
|
|
2894
|
+
|
|
2895
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
2896
|
+
"""
|
|
2897
|
+
|
|
2898
|
+
@model_serializer(mode="wrap")
|
|
2899
|
+
def serialize_model(self, handler):
|
|
2900
|
+
optional_fields = set(["ttl"])
|
|
2901
|
+
serialized = handler(self)
|
|
2902
|
+
m = {}
|
|
2903
|
+
|
|
2904
|
+
for n, f in type(self).model_fields.items():
|
|
2905
|
+
k = f.alias or n
|
|
2906
|
+
val = serialized.get(k)
|
|
2907
|
+
|
|
2908
|
+
if val != UNSET_SENTINEL:
|
|
2909
|
+
if val is not None or k not in optional_fields:
|
|
2910
|
+
m[k] = val
|
|
2911
|
+
|
|
2912
|
+
return m
|
|
2913
|
+
|
|
2914
|
+
|
|
2915
|
+
class CreatePromptMessagesPromptsToolMessageTypedDict(TypedDict):
|
|
2916
|
+
role: CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyRole
|
|
2917
|
+
r"""The role of the messages author, in this case tool."""
|
|
2918
|
+
content: CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyContentTypedDict
|
|
2919
|
+
r"""The contents of the tool message."""
|
|
2920
|
+
tool_call_id: Nullable[str]
|
|
2921
|
+
r"""Tool call that this message is responding to."""
|
|
2922
|
+
cache_control: NotRequired[CreatePromptMessagesPromptsCacheControlTypedDict]
|
|
2923
|
+
|
|
2924
|
+
|
|
2925
|
+
class CreatePromptMessagesPromptsToolMessage(BaseModel):
|
|
2926
|
+
role: CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyRole
|
|
2927
|
+
r"""The role of the messages author, in this case tool."""
|
|
2928
|
+
|
|
2929
|
+
content: CreatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyContent
|
|
2930
|
+
r"""The contents of the tool message."""
|
|
2931
|
+
|
|
2932
|
+
tool_call_id: Nullable[str]
|
|
2933
|
+
r"""Tool call that this message is responding to."""
|
|
2934
|
+
|
|
2935
|
+
cache_control: Optional[CreatePromptMessagesPromptsCacheControl] = None
|
|
2936
|
+
|
|
2937
|
+
@model_serializer(mode="wrap")
|
|
2938
|
+
def serialize_model(self, handler):
|
|
2939
|
+
optional_fields = set(["cache_control"])
|
|
2940
|
+
nullable_fields = set(["tool_call_id"])
|
|
2941
|
+
serialized = handler(self)
|
|
2942
|
+
m = {}
|
|
2943
|
+
|
|
2944
|
+
for n, f in type(self).model_fields.items():
|
|
2945
|
+
k = f.alias or n
|
|
2946
|
+
val = serialized.get(k)
|
|
2947
|
+
is_nullable_and_explicitly_set = (
|
|
2948
|
+
k in nullable_fields
|
|
2949
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2950
|
+
)
|
|
2951
|
+
|
|
2952
|
+
if val != UNSET_SENTINEL:
|
|
2953
|
+
if (
|
|
2954
|
+
val is not None
|
|
2955
|
+
or k not in optional_fields
|
|
2956
|
+
or is_nullable_and_explicitly_set
|
|
2957
|
+
):
|
|
2958
|
+
m[k] = val
|
|
2959
|
+
|
|
2960
|
+
return m
|
|
2961
|
+
|
|
2962
|
+
|
|
2963
|
+
CreatePromptContentPromptsResponse200ApplicationJSON2TypedDict = TypeAliasType(
|
|
2964
|
+
"CreatePromptContentPromptsResponse200ApplicationJSON2TypedDict",
|
|
2965
|
+
Union[
|
|
2966
|
+
RefusalPartSchemaTypedDict,
|
|
2967
|
+
RedactedReasoningPartSchemaTypedDict,
|
|
2968
|
+
TextContentPartSchemaTypedDict,
|
|
2969
|
+
ReasoningPartSchemaTypedDict,
|
|
2970
|
+
],
|
|
2971
|
+
)
|
|
2972
|
+
|
|
2973
|
+
|
|
2974
|
+
CreatePromptContentPromptsResponse200ApplicationJSON2 = Annotated[
|
|
2975
|
+
Union[
|
|
2976
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
2977
|
+
Annotated[RefusalPartSchema, Tag("refusal")],
|
|
2978
|
+
Annotated[ReasoningPartSchema, Tag("reasoning")],
|
|
2979
|
+
Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
|
|
2980
|
+
],
|
|
2981
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2982
|
+
]
|
|
2983
|
+
|
|
2984
|
+
|
|
2985
|
+
CreatePromptMessagesPromptsResponse200ApplicationJSONContentTypedDict = TypeAliasType(
|
|
2986
|
+
"CreatePromptMessagesPromptsResponse200ApplicationJSONContentTypedDict",
|
|
2987
|
+
Union[str, List[CreatePromptContentPromptsResponse200ApplicationJSON2TypedDict]],
|
|
2988
|
+
)
|
|
2989
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
2990
|
+
|
|
2991
|
+
|
|
2992
|
+
CreatePromptMessagesPromptsResponse200ApplicationJSONContent = TypeAliasType(
|
|
2993
|
+
"CreatePromptMessagesPromptsResponse200ApplicationJSONContent",
|
|
2994
|
+
Union[str, List[CreatePromptContentPromptsResponse200ApplicationJSON2]],
|
|
2995
|
+
)
|
|
2996
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
2997
|
+
|
|
2998
|
+
|
|
2999
|
+
CreatePromptMessagesPromptsResponse200ApplicationJSONRole = Literal["assistant",]
|
|
3000
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
3001
|
+
|
|
3002
|
+
|
|
3003
|
+
class CreatePromptMessagesPromptsAudioTypedDict(TypedDict):
|
|
3004
|
+
r"""Data about a previous audio response from the model."""
|
|
3005
|
+
|
|
3006
|
+
id: str
|
|
3007
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
3008
|
+
|
|
3009
|
+
|
|
3010
|
+
class CreatePromptMessagesPromptsAudio(BaseModel):
|
|
3011
|
+
r"""Data about a previous audio response from the model."""
|
|
3012
|
+
|
|
3013
|
+
id: str
|
|
3014
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
3015
|
+
|
|
3016
|
+
|
|
3017
|
+
CreatePromptMessagesPromptsResponseType = Literal["function",]
|
|
3018
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
3019
|
+
|
|
3020
|
+
|
|
3021
|
+
class CreatePromptMessagesPromptsFunctionTypedDict(TypedDict):
|
|
3022
|
+
name: NotRequired[str]
|
|
3023
|
+
r"""The name of the function to call."""
|
|
3024
|
+
arguments: NotRequired[str]
|
|
3025
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
3026
|
+
|
|
3027
|
+
|
|
3028
|
+
class CreatePromptMessagesPromptsFunction(BaseModel):
|
|
3029
|
+
name: Optional[str] = None
|
|
3030
|
+
r"""The name of the function to call."""
|
|
3031
|
+
|
|
3032
|
+
arguments: Optional[str] = None
|
|
3033
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
3034
|
+
|
|
3035
|
+
@model_serializer(mode="wrap")
|
|
3036
|
+
def serialize_model(self, handler):
|
|
3037
|
+
optional_fields = set(["name", "arguments"])
|
|
3038
|
+
serialized = handler(self)
|
|
3039
|
+
m = {}
|
|
3040
|
+
|
|
3041
|
+
for n, f in type(self).model_fields.items():
|
|
3042
|
+
k = f.alias or n
|
|
3043
|
+
val = serialized.get(k)
|
|
3044
|
+
|
|
3045
|
+
if val != UNSET_SENTINEL:
|
|
3046
|
+
if val is not None or k not in optional_fields:
|
|
3047
|
+
m[k] = val
|
|
3048
|
+
|
|
3049
|
+
return m
|
|
3050
|
+
|
|
3051
|
+
|
|
3052
|
+
class CreatePromptMessagesPromptsToolCallsTypedDict(TypedDict):
|
|
3053
|
+
id: str
|
|
3054
|
+
r"""The ID of the tool call."""
|
|
3055
|
+
type: CreatePromptMessagesPromptsResponseType
|
|
3056
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
3057
|
+
function: CreatePromptMessagesPromptsFunctionTypedDict
|
|
3058
|
+
thought_signature: NotRequired[str]
|
|
3059
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
3060
|
+
|
|
3061
|
+
|
|
3062
|
+
class CreatePromptMessagesPromptsToolCalls(BaseModel):
|
|
3063
|
+
id: str
|
|
3064
|
+
r"""The ID of the tool call."""
|
|
3065
|
+
|
|
3066
|
+
type: CreatePromptMessagesPromptsResponseType
|
|
3067
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
3068
|
+
|
|
3069
|
+
function: CreatePromptMessagesPromptsFunction
|
|
3070
|
+
|
|
3071
|
+
thought_signature: Optional[str] = None
|
|
3072
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
3073
|
+
|
|
3074
|
+
@model_serializer(mode="wrap")
|
|
3075
|
+
def serialize_model(self, handler):
|
|
3076
|
+
optional_fields = set(["thought_signature"])
|
|
3077
|
+
serialized = handler(self)
|
|
3078
|
+
m = {}
|
|
3079
|
+
|
|
3080
|
+
for n, f in type(self).model_fields.items():
|
|
3081
|
+
k = f.alias or n
|
|
3082
|
+
val = serialized.get(k)
|
|
3083
|
+
|
|
3084
|
+
if val != UNSET_SENTINEL:
|
|
3085
|
+
if val is not None or k not in optional_fields:
|
|
3086
|
+
m[k] = val
|
|
3087
|
+
|
|
3088
|
+
return m
|
|
3089
|
+
|
|
3090
|
+
|
|
3091
|
+
class CreatePromptMessagesPromptsAssistantMessageTypedDict(TypedDict):
|
|
3092
|
+
role: CreatePromptMessagesPromptsResponse200ApplicationJSONRole
|
|
3093
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
3094
|
+
content: NotRequired[
|
|
3095
|
+
Nullable[CreatePromptMessagesPromptsResponse200ApplicationJSONContentTypedDict]
|
|
3096
|
+
]
|
|
3097
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
3098
|
+
refusal: NotRequired[Nullable[str]]
|
|
3099
|
+
r"""The refusal message by the assistant."""
|
|
3100
|
+
name: NotRequired[str]
|
|
3101
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
3102
|
+
audio: NotRequired[Nullable[CreatePromptMessagesPromptsAudioTypedDict]]
|
|
3103
|
+
r"""Data about a previous audio response from the model."""
|
|
3104
|
+
tool_calls: NotRequired[List[CreatePromptMessagesPromptsToolCallsTypedDict]]
|
|
3105
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
3106
|
+
|
|
3107
|
+
|
|
3108
|
+
class CreatePromptMessagesPromptsAssistantMessage(BaseModel):
|
|
3109
|
+
role: CreatePromptMessagesPromptsResponse200ApplicationJSONRole
|
|
3110
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
3111
|
+
|
|
3112
|
+
content: OptionalNullable[
|
|
3113
|
+
CreatePromptMessagesPromptsResponse200ApplicationJSONContent
|
|
3114
|
+
] = UNSET
|
|
3115
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
3116
|
+
|
|
3117
|
+
refusal: OptionalNullable[str] = UNSET
|
|
3118
|
+
r"""The refusal message by the assistant."""
|
|
3119
|
+
|
|
3120
|
+
name: Optional[str] = None
|
|
3121
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
3122
|
+
|
|
3123
|
+
audio: OptionalNullable[CreatePromptMessagesPromptsAudio] = UNSET
|
|
3124
|
+
r"""Data about a previous audio response from the model."""
|
|
3125
|
+
|
|
3126
|
+
tool_calls: Optional[List[CreatePromptMessagesPromptsToolCalls]] = None
|
|
3127
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
3128
|
+
|
|
3129
|
+
@model_serializer(mode="wrap")
|
|
3130
|
+
def serialize_model(self, handler):
|
|
3131
|
+
optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
|
|
3132
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
3133
|
+
serialized = handler(self)
|
|
3134
|
+
m = {}
|
|
3135
|
+
|
|
3136
|
+
for n, f in type(self).model_fields.items():
|
|
3137
|
+
k = f.alias or n
|
|
3138
|
+
val = serialized.get(k)
|
|
3139
|
+
is_nullable_and_explicitly_set = (
|
|
3140
|
+
k in nullable_fields
|
|
3141
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3142
|
+
)
|
|
3143
|
+
|
|
3144
|
+
if val != UNSET_SENTINEL:
|
|
3145
|
+
if (
|
|
3146
|
+
val is not None
|
|
3147
|
+
or k not in optional_fields
|
|
3148
|
+
or is_nullable_and_explicitly_set
|
|
3149
|
+
):
|
|
3150
|
+
m[k] = val
|
|
3151
|
+
|
|
3152
|
+
return m
|
|
3153
|
+
|
|
3154
|
+
|
|
3155
|
+
CreatePromptMessagesPromptsResponse200Role = Literal["user",]
|
|
3156
|
+
r"""The role of the messages author, in this case `user`."""
|
|
3157
|
+
|
|
3158
|
+
|
|
3159
|
+
CreatePrompt2PromptsResponse200ApplicationJSONResponseBodyType = Literal["file",]
|
|
3160
|
+
r"""The type of the content part. Always `file`."""
|
|
3161
|
+
|
|
3162
|
+
|
|
3163
|
+
CreatePrompt2PromptsResponse200ApplicationJSONResponseBodyPromptType = Literal[
|
|
3164
|
+
"ephemeral",
|
|
3165
|
+
]
|
|
3166
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
3167
|
+
|
|
3168
|
+
|
|
3169
|
+
CreatePrompt2PromptsTTL = Literal[
|
|
3170
|
+
"5m",
|
|
3171
|
+
"1h",
|
|
3172
|
+
]
|
|
3173
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
3174
|
+
|
|
3175
|
+
- `5m`: 5 minutes
|
|
3176
|
+
- `1h`: 1 hour
|
|
3177
|
+
|
|
3178
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
3179
|
+
"""
|
|
3180
|
+
|
|
3181
|
+
|
|
3182
|
+
class CreatePrompt2PromptsCacheControlTypedDict(TypedDict):
|
|
3183
|
+
type: CreatePrompt2PromptsResponse200ApplicationJSONResponseBodyPromptType
|
|
3184
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
3185
|
+
ttl: NotRequired[CreatePrompt2PromptsTTL]
|
|
3186
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
3187
|
+
|
|
3188
|
+
- `5m`: 5 minutes
|
|
3189
|
+
- `1h`: 1 hour
|
|
3190
|
+
|
|
3191
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
3192
|
+
"""
|
|
3193
|
+
|
|
3194
|
+
|
|
3195
|
+
class CreatePrompt2PromptsCacheControl(BaseModel):
|
|
3196
|
+
type: CreatePrompt2PromptsResponse200ApplicationJSONResponseBodyPromptType
|
|
3197
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
3198
|
+
|
|
3199
|
+
ttl: Optional[CreatePrompt2PromptsTTL] = "5m"
|
|
3200
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
3201
|
+
|
|
3202
|
+
- `5m`: 5 minutes
|
|
3203
|
+
- `1h`: 1 hour
|
|
3204
|
+
|
|
3205
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
3206
|
+
"""
|
|
3207
|
+
|
|
3208
|
+
@model_serializer(mode="wrap")
|
|
3209
|
+
def serialize_model(self, handler):
|
|
3210
|
+
optional_fields = set(["ttl"])
|
|
3211
|
+
serialized = handler(self)
|
|
3212
|
+
m = {}
|
|
3213
|
+
|
|
3214
|
+
for n, f in type(self).model_fields.items():
|
|
3215
|
+
k = f.alias or n
|
|
3216
|
+
val = serialized.get(k)
|
|
3217
|
+
|
|
3218
|
+
if val != UNSET_SENTINEL:
|
|
3219
|
+
if val is not None or k not in optional_fields:
|
|
3220
|
+
m[k] = val
|
|
3221
|
+
|
|
3222
|
+
return m
|
|
3223
|
+
|
|
3224
|
+
|
|
3225
|
+
class CreatePrompt2Prompts4TypedDict(TypedDict):
|
|
3226
|
+
type: CreatePrompt2PromptsResponse200ApplicationJSONResponseBodyType
|
|
3227
|
+
r"""The type of the content part. Always `file`."""
|
|
3228
|
+
file: FileContentPartSchemaTypedDict
|
|
3229
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
3230
|
+
cache_control: NotRequired[CreatePrompt2PromptsCacheControlTypedDict]
|
|
3231
|
+
|
|
3232
|
+
|
|
3233
|
+
class CreatePrompt2Prompts4(BaseModel):
|
|
3234
|
+
type: CreatePrompt2PromptsResponse200ApplicationJSONResponseBodyType
|
|
3235
|
+
r"""The type of the content part. Always `file`."""
|
|
3236
|
+
|
|
3237
|
+
file: FileContentPartSchema
|
|
3238
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
1772
3239
|
|
|
1773
|
-
|
|
1774
|
-
Optional[CreatePromptPromptsThinkingLevel],
|
|
1775
|
-
pydantic.Field(alias="thinkingLevel"),
|
|
1776
|
-
] = None
|
|
1777
|
-
r"""The level of thinking to use for the model. Only supported by `Google AI`"""
|
|
3240
|
+
cache_control: Optional[CreatePrompt2PromptsCacheControl] = None
|
|
1778
3241
|
|
|
1779
3242
|
@model_serializer(mode="wrap")
|
|
1780
3243
|
def serialize_model(self, handler):
|
|
1781
|
-
optional_fields = [
|
|
1782
|
-
"temperature",
|
|
1783
|
-
"maxTokens",
|
|
1784
|
-
"topK",
|
|
1785
|
-
"topP",
|
|
1786
|
-
"frequencyPenalty",
|
|
1787
|
-
"presencePenalty",
|
|
1788
|
-
"numImages",
|
|
1789
|
-
"seed",
|
|
1790
|
-
"format",
|
|
1791
|
-
"dimensions",
|
|
1792
|
-
"quality",
|
|
1793
|
-
"style",
|
|
1794
|
-
"responseFormat",
|
|
1795
|
-
"photoRealVersion",
|
|
1796
|
-
"encoding_format",
|
|
1797
|
-
"reasoningEffort",
|
|
1798
|
-
"budgetTokens",
|
|
1799
|
-
"verbosity",
|
|
1800
|
-
"thinkingLevel",
|
|
1801
|
-
]
|
|
1802
|
-
nullable_fields = ["responseFormat"]
|
|
1803
|
-
null_default_fields = []
|
|
1804
|
-
|
|
3244
|
+
optional_fields = set(["cache_control"])
|
|
1805
3245
|
serialized = handler(self)
|
|
1806
|
-
|
|
1807
3246
|
m = {}
|
|
1808
3247
|
|
|
1809
3248
|
for n, f in type(self).model_fields.items():
|
|
1810
3249
|
k = f.alias or n
|
|
1811
3250
|
val = serialized.get(k)
|
|
1812
|
-
serialized.pop(k, None)
|
|
1813
3251
|
|
|
1814
|
-
|
|
1815
|
-
|
|
1816
|
-
|
|
1817
|
-
or k in null_default_fields
|
|
1818
|
-
) # pylint: disable=no-member
|
|
1819
|
-
|
|
1820
|
-
if val is not None and val != UNSET_SENTINEL:
|
|
1821
|
-
m[k] = val
|
|
1822
|
-
elif val != UNSET_SENTINEL and (
|
|
1823
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
1824
|
-
):
|
|
1825
|
-
m[k] = val
|
|
3252
|
+
if val != UNSET_SENTINEL:
|
|
3253
|
+
if val is not None or k not in optional_fields:
|
|
3254
|
+
m[k] = val
|
|
1826
3255
|
|
|
1827
3256
|
return m
|
|
1828
3257
|
|
|
1829
3258
|
|
|
1830
|
-
|
|
1831
|
-
"
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
"togetherai",
|
|
1840
|
-
"perplexity",
|
|
1841
|
-
"anthropic",
|
|
1842
|
-
"leonardoai",
|
|
1843
|
-
"fal",
|
|
1844
|
-
"nvidia",
|
|
1845
|
-
"jina",
|
|
1846
|
-
"elevenlabs",
|
|
1847
|
-
"litellm",
|
|
1848
|
-
"cerebras",
|
|
1849
|
-
"openailike",
|
|
1850
|
-
"bytedance",
|
|
1851
|
-
"mistral",
|
|
1852
|
-
"deepseek",
|
|
1853
|
-
"contextualai",
|
|
1854
|
-
"moonshotai",
|
|
1855
|
-
"zai",
|
|
1856
|
-
"slack",
|
|
1857
|
-
]
|
|
3259
|
+
CreatePromptContentPromptsResponse2002TypedDict = TypeAliasType(
|
|
3260
|
+
"CreatePromptContentPromptsResponse2002TypedDict",
|
|
3261
|
+
Union[
|
|
3262
|
+
AudioContentPartSchemaTypedDict,
|
|
3263
|
+
TextContentPartSchemaTypedDict,
|
|
3264
|
+
ImageContentPartSchemaTypedDict,
|
|
3265
|
+
CreatePrompt2Prompts4TypedDict,
|
|
3266
|
+
],
|
|
3267
|
+
)
|
|
1858
3268
|
|
|
1859
3269
|
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
"
|
|
1868
|
-
"expected_output",
|
|
3270
|
+
CreatePromptContentPromptsResponse2002 = Annotated[
|
|
3271
|
+
Union[
|
|
3272
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
3273
|
+
Annotated[ImageContentPartSchema, Tag("image_url")],
|
|
3274
|
+
Annotated[AudioContentPartSchema, Tag("input_audio")],
|
|
3275
|
+
Annotated[CreatePrompt2Prompts4, Tag("file")],
|
|
3276
|
+
],
|
|
3277
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1869
3278
|
]
|
|
1870
|
-
r"""The role of the prompt message"""
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
CreatePrompt2PromptsResponse200ApplicationJSONType = Literal["file",]
|
|
1874
|
-
r"""The type of the content part. Always `file`."""
|
|
1875
3279
|
|
|
1876
3280
|
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
mime_type: NotRequired[str]
|
|
1883
|
-
r"""MIME type of the file (e.g., application/pdf, image/png)"""
|
|
1884
|
-
filename: NotRequired[str]
|
|
1885
|
-
r"""The name of the file, used when passing the file to the model as a string."""
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
class CreatePrompt2PromptsFile(BaseModel):
|
|
1889
|
-
file_data: Optional[str] = None
|
|
1890
|
-
r"""The file data as a data URI string in the format 'data:<mime-type>;base64,<base64-encoded-data>'. Example: 'data:image/png;base64,iVBORw0KGgoAAAANS...'"""
|
|
1891
|
-
|
|
1892
|
-
uri: Optional[str] = None
|
|
1893
|
-
r"""URL to the file. Only supported by Anthropic Claude models for PDF files."""
|
|
1894
|
-
|
|
1895
|
-
mime_type: Annotated[Optional[str], pydantic.Field(alias="mimeType")] = None
|
|
1896
|
-
r"""MIME type of the file (e.g., application/pdf, image/png)"""
|
|
3281
|
+
CreatePromptMessagesPromptsResponse200ContentTypedDict = TypeAliasType(
|
|
3282
|
+
"CreatePromptMessagesPromptsResponse200ContentTypedDict",
|
|
3283
|
+
Union[str, List[CreatePromptContentPromptsResponse2002TypedDict]],
|
|
3284
|
+
)
|
|
3285
|
+
r"""The contents of the user message."""
|
|
1897
3286
|
|
|
1898
|
-
filename: Optional[str] = None
|
|
1899
|
-
r"""The name of the file, used when passing the file to the model as a string."""
|
|
1900
3287
|
|
|
3288
|
+
CreatePromptMessagesPromptsResponse200Content = TypeAliasType(
|
|
3289
|
+
"CreatePromptMessagesPromptsResponse200Content",
|
|
3290
|
+
Union[str, List[CreatePromptContentPromptsResponse2002]],
|
|
3291
|
+
)
|
|
3292
|
+
r"""The contents of the user message."""
|
|
1901
3293
|
|
|
1902
|
-
class CreatePrompt23TypedDict(TypedDict):
|
|
1903
|
-
type: CreatePrompt2PromptsResponse200ApplicationJSONType
|
|
1904
|
-
r"""The type of the content part. Always `file`."""
|
|
1905
|
-
file: CreatePrompt2PromptsFileTypedDict
|
|
1906
3294
|
|
|
3295
|
+
class CreatePromptMessagesPromptsUserMessageTypedDict(TypedDict):
|
|
3296
|
+
role: CreatePromptMessagesPromptsResponse200Role
|
|
3297
|
+
r"""The role of the messages author, in this case `user`."""
|
|
3298
|
+
content: CreatePromptMessagesPromptsResponse200ContentTypedDict
|
|
3299
|
+
r"""The contents of the user message."""
|
|
3300
|
+
name: NotRequired[str]
|
|
3301
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1907
3302
|
|
|
1908
|
-
class CreatePrompt23(BaseModel):
|
|
1909
|
-
type: CreatePrompt2PromptsResponse200ApplicationJSONType
|
|
1910
|
-
r"""The type of the content part. Always `file`."""
|
|
1911
3303
|
|
|
1912
|
-
|
|
3304
|
+
class CreatePromptMessagesPromptsUserMessage(BaseModel):
|
|
3305
|
+
role: CreatePromptMessagesPromptsResponse200Role
|
|
3306
|
+
r"""The role of the messages author, in this case `user`."""
|
|
1913
3307
|
|
|
3308
|
+
content: CreatePromptMessagesPromptsResponse200Content
|
|
3309
|
+
r"""The contents of the user message."""
|
|
1914
3310
|
|
|
1915
|
-
|
|
3311
|
+
name: Optional[str] = None
|
|
3312
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1916
3313
|
|
|
3314
|
+
@model_serializer(mode="wrap")
|
|
3315
|
+
def serialize_model(self, handler):
|
|
3316
|
+
optional_fields = set(["name"])
|
|
3317
|
+
serialized = handler(self)
|
|
3318
|
+
m = {}
|
|
1917
3319
|
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
id: NotRequired[str]
|
|
1922
|
-
r"""The orq.ai id of the image"""
|
|
1923
|
-
detail: NotRequired[str]
|
|
1924
|
-
r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
|
|
3320
|
+
for n, f in type(self).model_fields.items():
|
|
3321
|
+
k = f.alias or n
|
|
3322
|
+
val = serialized.get(k)
|
|
1925
3323
|
|
|
3324
|
+
if val != UNSET_SENTINEL:
|
|
3325
|
+
if val is not None or k not in optional_fields:
|
|
3326
|
+
m[k] = val
|
|
1926
3327
|
|
|
1927
|
-
|
|
1928
|
-
url: str
|
|
1929
|
-
r"""Either a URL of the image or the base64 encoded data URI."""
|
|
3328
|
+
return m
|
|
1930
3329
|
|
|
1931
|
-
id: Optional[str] = None
|
|
1932
|
-
r"""The orq.ai id of the image"""
|
|
1933
3330
|
|
|
1934
|
-
|
|
1935
|
-
|
|
3331
|
+
CreatePromptMessagesPromptsResponseRole = Literal["system",]
|
|
3332
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1936
3333
|
|
|
1937
3334
|
|
|
1938
|
-
|
|
1939
|
-
|
|
3335
|
+
CreatePromptMessagesPromptsResponseContentTypedDict = TypeAliasType(
|
|
3336
|
+
"CreatePromptMessagesPromptsResponseContentTypedDict",
|
|
3337
|
+
Union[str, List[TextContentPartSchemaTypedDict]],
|
|
3338
|
+
)
|
|
3339
|
+
r"""The contents of the system message."""
|
|
1940
3340
|
|
|
1941
|
-
type: CreatePrompt2PromptsResponse200Type
|
|
1942
|
-
image_url: CreatePrompt2PromptsImageURLTypedDict
|
|
1943
3341
|
|
|
3342
|
+
CreatePromptMessagesPromptsResponseContent = TypeAliasType(
|
|
3343
|
+
"CreatePromptMessagesPromptsResponseContent",
|
|
3344
|
+
Union[str, List[TextContentPartSchema]],
|
|
3345
|
+
)
|
|
3346
|
+
r"""The contents of the system message."""
|
|
1944
3347
|
|
|
1945
|
-
class CreatePrompt2Prompts2(BaseModel):
|
|
1946
|
-
r"""The image part of the prompt message. Only supported with vision models."""
|
|
1947
3348
|
|
|
1948
|
-
|
|
3349
|
+
class CreatePromptMessagesPromptsSystemMessageTypedDict(TypedDict):
|
|
3350
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
1949
3351
|
|
|
1950
|
-
|
|
3352
|
+
role: CreatePromptMessagesPromptsResponseRole
|
|
3353
|
+
r"""The role of the messages author, in this case `system`."""
|
|
3354
|
+
content: CreatePromptMessagesPromptsResponseContentTypedDict
|
|
3355
|
+
r"""The contents of the system message."""
|
|
3356
|
+
name: NotRequired[str]
|
|
3357
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1951
3358
|
|
|
1952
3359
|
|
|
1953
|
-
|
|
3360
|
+
class CreatePromptMessagesPromptsSystemMessage(BaseModel):
|
|
3361
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
1954
3362
|
|
|
3363
|
+
role: CreatePromptMessagesPromptsResponseRole
|
|
3364
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1955
3365
|
|
|
1956
|
-
|
|
1957
|
-
r"""
|
|
3366
|
+
content: CreatePromptMessagesPromptsResponseContent
|
|
3367
|
+
r"""The contents of the system message."""
|
|
1958
3368
|
|
|
1959
|
-
|
|
1960
|
-
|
|
3369
|
+
name: Optional[str] = None
|
|
3370
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1961
3371
|
|
|
3372
|
+
@model_serializer(mode="wrap")
|
|
3373
|
+
def serialize_model(self, handler):
|
|
3374
|
+
optional_fields = set(["name"])
|
|
3375
|
+
serialized = handler(self)
|
|
3376
|
+
m = {}
|
|
1962
3377
|
|
|
1963
|
-
|
|
1964
|
-
|
|
3378
|
+
for n, f in type(self).model_fields.items():
|
|
3379
|
+
k = f.alias or n
|
|
3380
|
+
val = serialized.get(k)
|
|
1965
3381
|
|
|
1966
|
-
|
|
3382
|
+
if val != UNSET_SENTINEL:
|
|
3383
|
+
if val is not None or k not in optional_fields:
|
|
3384
|
+
m[k] = val
|
|
1967
3385
|
|
|
1968
|
-
|
|
3386
|
+
return m
|
|
1969
3387
|
|
|
1970
3388
|
|
|
1971
|
-
|
|
1972
|
-
"
|
|
3389
|
+
CreatePromptPromptsResponseMessagesTypedDict = TypeAliasType(
|
|
3390
|
+
"CreatePromptPromptsResponseMessagesTypedDict",
|
|
1973
3391
|
Union[
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
3392
|
+
CreatePromptMessagesPromptsSystemMessageTypedDict,
|
|
3393
|
+
CreatePromptMessagesPromptsUserMessageTypedDict,
|
|
3394
|
+
CreatePromptMessagesPromptsToolMessageTypedDict,
|
|
3395
|
+
CreatePromptMessagesPromptsAssistantMessageTypedDict,
|
|
1977
3396
|
],
|
|
1978
3397
|
)
|
|
1979
3398
|
|
|
1980
3399
|
|
|
1981
|
-
|
|
3400
|
+
CreatePromptPromptsResponseMessages = Annotated[
|
|
1982
3401
|
Union[
|
|
1983
|
-
Annotated[
|
|
1984
|
-
Annotated[
|
|
1985
|
-
Annotated[
|
|
3402
|
+
Annotated[CreatePromptMessagesPromptsSystemMessage, Tag("system")],
|
|
3403
|
+
Annotated[CreatePromptMessagesPromptsUserMessage, Tag("user")],
|
|
3404
|
+
Annotated[CreatePromptMessagesPromptsAssistantMessage, Tag("assistant")],
|
|
3405
|
+
Annotated[CreatePromptMessagesPromptsToolMessage, Tag("tool")],
|
|
1986
3406
|
],
|
|
1987
|
-
Discriminator(lambda m: get_discriminator(m, "
|
|
3407
|
+
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
1988
3408
|
]
|
|
1989
3409
|
|
|
1990
3410
|
|
|
1991
|
-
|
|
1992
|
-
"
|
|
1993
|
-
Union[str, List[CreatePromptContentPromptsResponse2TypedDict]],
|
|
1994
|
-
)
|
|
1995
|
-
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
CreatePromptPromptsContent = TypeAliasType(
|
|
1999
|
-
"CreatePromptPromptsContent", Union[str, List[CreatePromptContentPromptsResponse2]]
|
|
2000
|
-
)
|
|
2001
|
-
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
CreatePromptPromptsResponseType = Literal["function",]
|
|
3411
|
+
class PromptFieldTypedDict(TypedDict):
|
|
3412
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2005
3413
|
|
|
3414
|
+
name: NotRequired[str]
|
|
3415
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
3416
|
+
audio: NotRequired[Nullable[CreatePromptPromptsAudioTypedDict]]
|
|
3417
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
3418
|
+
frequency_penalty: NotRequired[Nullable[float]]
|
|
3419
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
3420
|
+
max_tokens: NotRequired[Nullable[int]]
|
|
3421
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
2006
3422
|
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
r"""
|
|
3423
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
3424
|
+
"""
|
|
3425
|
+
max_completion_tokens: NotRequired[Nullable[int]]
|
|
3426
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
3427
|
+
logprobs: NotRequired[Nullable[bool]]
|
|
3428
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
3429
|
+
top_logprobs: NotRequired[Nullable[int]]
|
|
3430
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
3431
|
+
n: NotRequired[Nullable[int]]
|
|
3432
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
3433
|
+
presence_penalty: NotRequired[Nullable[float]]
|
|
3434
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
3435
|
+
response_format: NotRequired[CreatePromptPromptsResponseFormatTypedDict]
|
|
3436
|
+
r"""An object specifying the format that the model must output"""
|
|
3437
|
+
reasoning_effort: NotRequired[CreatePromptPromptsReasoningEffort]
|
|
3438
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
2011
3439
|
|
|
3440
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
3441
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
3442
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
3443
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
2012
3444
|
|
|
2013
|
-
|
|
2014
|
-
|
|
3445
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
3446
|
+
"""
|
|
3447
|
+
verbosity: NotRequired[str]
|
|
3448
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
3449
|
+
seed: NotRequired[Nullable[float]]
|
|
3450
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
3451
|
+
stop: NotRequired[Nullable[CreatePromptPromptsStopTypedDict]]
|
|
3452
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
3453
|
+
stream_options: NotRequired[Nullable[CreatePromptPromptsStreamOptionsTypedDict]]
|
|
3454
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
3455
|
+
thinking: NotRequired[CreatePromptPromptsThinkingTypedDict]
|
|
3456
|
+
temperature: NotRequired[Nullable[float]]
|
|
3457
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
3458
|
+
top_p: NotRequired[Nullable[float]]
|
|
3459
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
3460
|
+
top_k: NotRequired[Nullable[float]]
|
|
3461
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
3462
|
+
tool_choice: NotRequired[CreatePromptPromptsToolChoiceTypedDict]
|
|
3463
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
3464
|
+
parallel_tool_calls: NotRequired[bool]
|
|
3465
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
3466
|
+
modalities: NotRequired[Nullable[List[CreatePromptPromptsModalities]]]
|
|
3467
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
3468
|
+
guardrails: NotRequired[List[CreatePromptPromptsGuardrailsTypedDict]]
|
|
3469
|
+
r"""A list of guardrails to apply to the request."""
|
|
3470
|
+
fallbacks: NotRequired[List[CreatePromptPromptsFallbacksTypedDict]]
|
|
3471
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
3472
|
+
retry: NotRequired[CreatePromptPromptsRetryTypedDict]
|
|
3473
|
+
r"""Retry configuration for the request"""
|
|
3474
|
+
cache: NotRequired[CreatePromptPromptsCacheTypedDict]
|
|
3475
|
+
r"""Cache configuration for the request."""
|
|
3476
|
+
load_balancer: NotRequired[CreatePromptPromptsLoadBalancerTypedDict]
|
|
3477
|
+
r"""Load balancer configuration for the request."""
|
|
3478
|
+
timeout: NotRequired[CreatePromptPromptsTimeoutTypedDict]
|
|
3479
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
3480
|
+
messages: NotRequired[List[CreatePromptPromptsResponseMessagesTypedDict]]
|
|
3481
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
3482
|
+
model: NotRequired[Nullable[str]]
|
|
3483
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
|
|
3484
|
+
version: NotRequired[str]
|
|
2015
3485
|
|
|
2016
|
-
arguments: str
|
|
2017
|
-
r"""JSON string arguments for the functions"""
|
|
2018
3486
|
|
|
3487
|
+
class PromptField(BaseModel):
|
|
3488
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2019
3489
|
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
function: CreatePromptPromptsFunctionTypedDict
|
|
2023
|
-
id: NotRequired[str]
|
|
2024
|
-
index: NotRequired[float]
|
|
3490
|
+
name: Optional[str] = None
|
|
3491
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
2025
3492
|
|
|
3493
|
+
audio: OptionalNullable[CreatePromptPromptsAudio] = UNSET
|
|
3494
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
2026
3495
|
|
|
2027
|
-
|
|
2028
|
-
|
|
3496
|
+
frequency_penalty: OptionalNullable[float] = UNSET
|
|
3497
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
2029
3498
|
|
|
2030
|
-
|
|
3499
|
+
max_tokens: OptionalNullable[int] = UNSET
|
|
3500
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
2031
3501
|
|
|
2032
|
-
|
|
3502
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
3503
|
+
"""
|
|
2033
3504
|
|
|
2034
|
-
|
|
3505
|
+
max_completion_tokens: OptionalNullable[int] = UNSET
|
|
3506
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
2035
3507
|
|
|
3508
|
+
logprobs: OptionalNullable[bool] = UNSET
|
|
3509
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
2036
3510
|
|
|
2037
|
-
|
|
2038
|
-
|
|
2039
|
-
r"""The role of the prompt message"""
|
|
2040
|
-
content: Nullable[CreatePromptPromptsContentTypedDict]
|
|
2041
|
-
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
2042
|
-
tool_calls: NotRequired[List[CreatePromptPromptsToolCallsTypedDict]]
|
|
2043
|
-
tool_call_id: NotRequired[Nullable[str]]
|
|
3511
|
+
top_logprobs: OptionalNullable[int] = UNSET
|
|
3512
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
2044
3513
|
|
|
3514
|
+
n: OptionalNullable[int] = UNSET
|
|
3515
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
2045
3516
|
|
|
2046
|
-
|
|
2047
|
-
|
|
2048
|
-
r"""The role of the prompt message"""
|
|
3517
|
+
presence_penalty: OptionalNullable[float] = UNSET
|
|
3518
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
2049
3519
|
|
|
2050
|
-
|
|
2051
|
-
r"""
|
|
3520
|
+
response_format: Optional[CreatePromptPromptsResponseFormat] = None
|
|
3521
|
+
r"""An object specifying the format that the model must output"""
|
|
2052
3522
|
|
|
2053
|
-
|
|
3523
|
+
reasoning_effort: Optional[CreatePromptPromptsReasoningEffort] = None
|
|
3524
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
2054
3525
|
|
|
2055
|
-
|
|
3526
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
3527
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
3528
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
3529
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
2056
3530
|
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
optional_fields = ["tool_calls", "tool_call_id"]
|
|
2060
|
-
nullable_fields = ["content", "tool_call_id"]
|
|
2061
|
-
null_default_fields = []
|
|
3531
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
3532
|
+
"""
|
|
2062
3533
|
|
|
2063
|
-
|
|
3534
|
+
verbosity: Optional[str] = None
|
|
3535
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
2064
3536
|
|
|
2065
|
-
|
|
3537
|
+
seed: OptionalNullable[float] = UNSET
|
|
3538
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
2066
3539
|
|
|
2067
|
-
|
|
2068
|
-
|
|
2069
|
-
val = serialized.get(k)
|
|
2070
|
-
serialized.pop(k, None)
|
|
3540
|
+
stop: OptionalNullable[CreatePromptPromptsStop] = UNSET
|
|
3541
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
2071
3542
|
|
|
2072
|
-
|
|
2073
|
-
|
|
2074
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
2075
|
-
or k in null_default_fields
|
|
2076
|
-
) # pylint: disable=no-member
|
|
3543
|
+
stream_options: OptionalNullable[CreatePromptPromptsStreamOptions] = UNSET
|
|
3544
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
2077
3545
|
|
|
2078
|
-
|
|
2079
|
-
m[k] = val
|
|
2080
|
-
elif val != UNSET_SENTINEL and (
|
|
2081
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
2082
|
-
):
|
|
2083
|
-
m[k] = val
|
|
3546
|
+
thinking: Optional[CreatePromptPromptsThinking] = None
|
|
2084
3547
|
|
|
2085
|
-
|
|
3548
|
+
temperature: OptionalNullable[float] = UNSET
|
|
3549
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
2086
3550
|
|
|
3551
|
+
top_p: OptionalNullable[float] = UNSET
|
|
3552
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
2087
3553
|
|
|
2088
|
-
|
|
2089
|
-
r"""
|
|
3554
|
+
top_k: OptionalNullable[float] = UNSET
|
|
3555
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
2090
3556
|
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
model: NotRequired[str]
|
|
2094
|
-
model_db_id: NotRequired[Nullable[str]]
|
|
2095
|
-
r"""The id of the resource"""
|
|
2096
|
-
model_type: NotRequired[Nullable[CreatePromptModelType]]
|
|
2097
|
-
r"""The modality of the model"""
|
|
2098
|
-
model_parameters: NotRequired[CreatePromptModelParametersTypedDict]
|
|
2099
|
-
r"""Model Parameters: Not all parameters apply to every model"""
|
|
2100
|
-
provider: NotRequired[CreatePromptProvider]
|
|
2101
|
-
integration_id: NotRequired[Nullable[str]]
|
|
2102
|
-
r"""The ID of the integration to use"""
|
|
2103
|
-
version: NotRequired[str]
|
|
3557
|
+
tool_choice: Optional[CreatePromptPromptsToolChoice] = None
|
|
3558
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
2104
3559
|
|
|
3560
|
+
parallel_tool_calls: Optional[bool] = None
|
|
3561
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
2105
3562
|
|
|
2106
|
-
|
|
2107
|
-
r"""
|
|
3563
|
+
modalities: OptionalNullable[List[CreatePromptPromptsModalities]] = UNSET
|
|
3564
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
2108
3565
|
|
|
2109
|
-
|
|
3566
|
+
guardrails: Optional[List[CreatePromptPromptsGuardrails]] = None
|
|
3567
|
+
r"""A list of guardrails to apply to the request."""
|
|
2110
3568
|
|
|
2111
|
-
|
|
3569
|
+
fallbacks: Optional[List[CreatePromptPromptsFallbacks]] = None
|
|
3570
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2112
3571
|
|
|
2113
|
-
|
|
3572
|
+
retry: Optional[CreatePromptPromptsRetry] = None
|
|
3573
|
+
r"""Retry configuration for the request"""
|
|
2114
3574
|
|
|
2115
|
-
|
|
2116
|
-
r"""
|
|
3575
|
+
cache: Optional[CreatePromptPromptsCache] = None
|
|
3576
|
+
r"""Cache configuration for the request."""
|
|
2117
3577
|
|
|
2118
|
-
|
|
2119
|
-
r"""
|
|
3578
|
+
load_balancer: Optional[CreatePromptPromptsLoadBalancer] = None
|
|
3579
|
+
r"""Load balancer configuration for the request."""
|
|
2120
3580
|
|
|
2121
|
-
|
|
2122
|
-
r"""
|
|
3581
|
+
timeout: Optional[CreatePromptPromptsTimeout] = None
|
|
3582
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2123
3583
|
|
|
2124
|
-
|
|
3584
|
+
messages: Optional[List[CreatePromptPromptsResponseMessages]] = None
|
|
3585
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
2125
3586
|
|
|
2126
|
-
|
|
2127
|
-
r"""
|
|
3587
|
+
model: OptionalNullable[str] = UNSET
|
|
3588
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
|
|
2128
3589
|
|
|
2129
3590
|
version: Optional[str] = None
|
|
2130
3591
|
|
|
2131
3592
|
@model_serializer(mode="wrap")
|
|
2132
3593
|
def serialize_model(self, handler):
|
|
2133
|
-
optional_fields =
|
|
2134
|
-
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
|
|
2144
|
-
|
|
2145
|
-
|
|
3594
|
+
optional_fields = set(
|
|
3595
|
+
[
|
|
3596
|
+
"name",
|
|
3597
|
+
"audio",
|
|
3598
|
+
"frequency_penalty",
|
|
3599
|
+
"max_tokens",
|
|
3600
|
+
"max_completion_tokens",
|
|
3601
|
+
"logprobs",
|
|
3602
|
+
"top_logprobs",
|
|
3603
|
+
"n",
|
|
3604
|
+
"presence_penalty",
|
|
3605
|
+
"response_format",
|
|
3606
|
+
"reasoning_effort",
|
|
3607
|
+
"verbosity",
|
|
3608
|
+
"seed",
|
|
3609
|
+
"stop",
|
|
3610
|
+
"stream_options",
|
|
3611
|
+
"thinking",
|
|
3612
|
+
"temperature",
|
|
3613
|
+
"top_p",
|
|
3614
|
+
"top_k",
|
|
3615
|
+
"tool_choice",
|
|
3616
|
+
"parallel_tool_calls",
|
|
3617
|
+
"modalities",
|
|
3618
|
+
"guardrails",
|
|
3619
|
+
"fallbacks",
|
|
3620
|
+
"retry",
|
|
3621
|
+
"cache",
|
|
3622
|
+
"load_balancer",
|
|
3623
|
+
"timeout",
|
|
3624
|
+
"messages",
|
|
3625
|
+
"model",
|
|
3626
|
+
"version",
|
|
3627
|
+
]
|
|
3628
|
+
)
|
|
3629
|
+
nullable_fields = set(
|
|
3630
|
+
[
|
|
3631
|
+
"audio",
|
|
3632
|
+
"frequency_penalty",
|
|
3633
|
+
"max_tokens",
|
|
3634
|
+
"max_completion_tokens",
|
|
3635
|
+
"logprobs",
|
|
3636
|
+
"top_logprobs",
|
|
3637
|
+
"n",
|
|
3638
|
+
"presence_penalty",
|
|
3639
|
+
"seed",
|
|
3640
|
+
"stop",
|
|
3641
|
+
"stream_options",
|
|
3642
|
+
"temperature",
|
|
3643
|
+
"top_p",
|
|
3644
|
+
"top_k",
|
|
3645
|
+
"modalities",
|
|
3646
|
+
"model",
|
|
3647
|
+
]
|
|
3648
|
+
)
|
|
2146
3649
|
serialized = handler(self)
|
|
2147
|
-
|
|
2148
3650
|
m = {}
|
|
2149
3651
|
|
|
2150
3652
|
for n, f in type(self).model_fields.items():
|
|
2151
3653
|
k = f.alias or n
|
|
2152
3654
|
val = serialized.get(k)
|
|
2153
|
-
|
|
2154
|
-
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
):
|
|
2166
|
-
m[k] = val
|
|
3655
|
+
is_nullable_and_explicitly_set = (
|
|
3656
|
+
k in nullable_fields
|
|
3657
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3658
|
+
)
|
|
3659
|
+
|
|
3660
|
+
if val != UNSET_SENTINEL:
|
|
3661
|
+
if (
|
|
3662
|
+
val is not None
|
|
3663
|
+
or k not in optional_fields
|
|
3664
|
+
or is_nullable_and_explicitly_set
|
|
3665
|
+
):
|
|
3666
|
+
m[k] = val
|
|
2167
3667
|
|
|
2168
3668
|
return m
|
|
2169
3669
|
|
|
@@ -2220,31 +3720,26 @@ class CreatePromptPromptsMetadata(BaseModel):
|
|
|
2220
3720
|
|
|
2221
3721
|
@model_serializer(mode="wrap")
|
|
2222
3722
|
def serialize_model(self, handler):
|
|
2223
|
-
optional_fields = ["use_cases", "language"]
|
|
2224
|
-
nullable_fields = ["language"]
|
|
2225
|
-
null_default_fields = []
|
|
2226
|
-
|
|
3723
|
+
optional_fields = set(["use_cases", "language"])
|
|
3724
|
+
nullable_fields = set(["language"])
|
|
2227
3725
|
serialized = handler(self)
|
|
2228
|
-
|
|
2229
3726
|
m = {}
|
|
2230
3727
|
|
|
2231
3728
|
for n, f in type(self).model_fields.items():
|
|
2232
3729
|
k = f.alias or n
|
|
2233
3730
|
val = serialized.get(k)
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
|
|
2243
|
-
|
|
2244
|
-
|
|
2245
|
-
|
|
2246
|
-
):
|
|
2247
|
-
m[k] = val
|
|
3731
|
+
is_nullable_and_explicitly_set = (
|
|
3732
|
+
k in nullable_fields
|
|
3733
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3734
|
+
)
|
|
3735
|
+
|
|
3736
|
+
if val != UNSET_SENTINEL:
|
|
3737
|
+
if (
|
|
3738
|
+
val is not None
|
|
3739
|
+
or k not in optional_fields
|
|
3740
|
+
or is_nullable_and_explicitly_set
|
|
3741
|
+
):
|
|
3742
|
+
m[k] = val
|
|
2248
3743
|
|
|
2249
3744
|
return m
|
|
2250
3745
|
|
|
@@ -2260,12 +3755,14 @@ class CreatePromptPromptTypedDict(TypedDict):
|
|
|
2260
3755
|
updated: str
|
|
2261
3756
|
display_name: str
|
|
2262
3757
|
r"""The prompt’s name, meant to be displayable in the UI."""
|
|
2263
|
-
|
|
2264
|
-
r"""
|
|
3758
|
+
prompt: PromptFieldTypedDict
|
|
3759
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2265
3760
|
created_by_id: NotRequired[Nullable[str]]
|
|
2266
3761
|
updated_by_id: NotRequired[Nullable[str]]
|
|
2267
3762
|
description: NotRequired[Nullable[str]]
|
|
2268
3763
|
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
3764
|
+
prompt_config: NotRequired[PromptConfigTypedDict]
|
|
3765
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
2269
3766
|
metadata: NotRequired[CreatePromptPromptsMetadataTypedDict]
|
|
2270
3767
|
|
|
2271
3768
|
|
|
@@ -2287,8 +3784,8 @@ class CreatePromptPrompt(BaseModel):
|
|
|
2287
3784
|
display_name: str
|
|
2288
3785
|
r"""The prompt’s name, meant to be displayable in the UI."""
|
|
2289
3786
|
|
|
2290
|
-
|
|
2291
|
-
r"""
|
|
3787
|
+
prompt: PromptField
|
|
3788
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2292
3789
|
|
|
2293
3790
|
created_by_id: OptionalNullable[str] = UNSET
|
|
2294
3791
|
|
|
@@ -2297,34 +3794,45 @@ class CreatePromptPrompt(BaseModel):
|
|
|
2297
3794
|
description: OptionalNullable[str] = UNSET
|
|
2298
3795
|
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
2299
3796
|
|
|
3797
|
+
prompt_config: Annotated[
|
|
3798
|
+
Optional[PromptConfig],
|
|
3799
|
+
pydantic.Field(
|
|
3800
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
3801
|
+
),
|
|
3802
|
+
] = None
|
|
3803
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
3804
|
+
|
|
2300
3805
|
metadata: Optional[CreatePromptPromptsMetadata] = None
|
|
2301
3806
|
|
|
2302
3807
|
@model_serializer(mode="wrap")
|
|
2303
3808
|
def serialize_model(self, handler):
|
|
2304
|
-
optional_fields =
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
3809
|
+
optional_fields = set(
|
|
3810
|
+
[
|
|
3811
|
+
"created_by_id",
|
|
3812
|
+
"updated_by_id",
|
|
3813
|
+
"description",
|
|
3814
|
+
"prompt_config",
|
|
3815
|
+
"metadata",
|
|
3816
|
+
]
|
|
3817
|
+
)
|
|
3818
|
+
nullable_fields = set(["created_by_id", "updated_by_id", "description"])
|
|
2308
3819
|
serialized = handler(self)
|
|
2309
|
-
|
|
2310
3820
|
m = {}
|
|
2311
3821
|
|
|
2312
3822
|
for n, f in type(self).model_fields.items():
|
|
2313
3823
|
k = f.alias or n
|
|
2314
3824
|
val = serialized.get(k)
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2322
|
-
|
|
2323
|
-
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
):
|
|
2328
|
-
m[k] = val
|
|
3825
|
+
is_nullable_and_explicitly_set = (
|
|
3826
|
+
k in nullable_fields
|
|
3827
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
3828
|
+
)
|
|
3829
|
+
|
|
3830
|
+
if val != UNSET_SENTINEL:
|
|
3831
|
+
if (
|
|
3832
|
+
val is not None
|
|
3833
|
+
or k not in optional_fields
|
|
3834
|
+
or is_nullable_and_explicitly_set
|
|
3835
|
+
):
|
|
3836
|
+
m[k] = val
|
|
2329
3837
|
|
|
2330
3838
|
return m
|