orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5746 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1945 -383
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1381 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +407 -128
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2759 -1251
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1696 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1679 -230
- orq_ai_sdk/models/getpromptversionop.py +1676 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +805 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1690 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1462 -196
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1439 -194
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1968 -397
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2854 -1448
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +364 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
- orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -1,6 +1,30 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .audiocontentpartschema import (
|
|
5
|
+
AudioContentPartSchema,
|
|
6
|
+
AudioContentPartSchemaTypedDict,
|
|
7
|
+
)
|
|
8
|
+
from .filecontentpartschema import FileContentPartSchema, FileContentPartSchemaTypedDict
|
|
9
|
+
from .imagecontentpartschema import (
|
|
10
|
+
ImageContentPartSchema,
|
|
11
|
+
ImageContentPartSchemaTypedDict,
|
|
12
|
+
)
|
|
13
|
+
from .reasoningpartschema import ReasoningPartSchema, ReasoningPartSchemaTypedDict
|
|
14
|
+
from .redactedreasoningpartschema import (
|
|
15
|
+
RedactedReasoningPartSchema,
|
|
16
|
+
RedactedReasoningPartSchemaTypedDict,
|
|
17
|
+
)
|
|
18
|
+
from .refusalpartschema import RefusalPartSchema, RefusalPartSchemaTypedDict
|
|
19
|
+
from .textcontentpartschema import TextContentPartSchema, TextContentPartSchemaTypedDict
|
|
20
|
+
from .thinkingconfigdisabledschema import (
|
|
21
|
+
ThinkingConfigDisabledSchema,
|
|
22
|
+
ThinkingConfigDisabledSchemaTypedDict,
|
|
23
|
+
)
|
|
24
|
+
from .thinkingconfigenabledschema import (
|
|
25
|
+
ThinkingConfigEnabledSchema,
|
|
26
|
+
ThinkingConfigEnabledSchemaTypedDict,
|
|
27
|
+
)
|
|
4
28
|
from orq_ai_sdk.types import (
|
|
5
29
|
BaseModel,
|
|
6
30
|
Nullable,
|
|
@@ -17,7 +41,13 @@ from orq_ai_sdk.utils import (
|
|
|
17
41
|
import pydantic
|
|
18
42
|
from pydantic import Discriminator, Tag, model_serializer
|
|
19
43
|
from typing import Any, Dict, List, Literal, Optional, Union
|
|
20
|
-
from typing_extensions import
|
|
44
|
+
from typing_extensions import (
|
|
45
|
+
Annotated,
|
|
46
|
+
NotRequired,
|
|
47
|
+
TypeAliasType,
|
|
48
|
+
TypedDict,
|
|
49
|
+
deprecated,
|
|
50
|
+
)
|
|
21
51
|
|
|
22
52
|
|
|
23
53
|
class ListPromptVersionsRequestTypedDict(TypedDict):
|
|
@@ -53,6 +83,22 @@ class ListPromptVersionsRequest(BaseModel):
|
|
|
53
83
|
] = None
|
|
54
84
|
r"""A cursor for use in pagination. `ending_before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 20 objects, starting with `01JJ1HDHN79XAS7A01WB3HYSDB`, your subsequent call can include `before=01JJ1HDHN79XAS7A01WB3HYSDB` in order to fetch the previous page of the list."""
|
|
55
85
|
|
|
86
|
+
@model_serializer(mode="wrap")
|
|
87
|
+
def serialize_model(self, handler):
|
|
88
|
+
optional_fields = set(["limit", "starting_after", "ending_before"])
|
|
89
|
+
serialized = handler(self)
|
|
90
|
+
m = {}
|
|
91
|
+
|
|
92
|
+
for n, f in type(self).model_fields.items():
|
|
93
|
+
k = f.alias or n
|
|
94
|
+
val = serialized.get(k)
|
|
95
|
+
|
|
96
|
+
if val != UNSET_SENTINEL:
|
|
97
|
+
if val is not None or k not in optional_fields:
|
|
98
|
+
m[k] = val
|
|
99
|
+
|
|
100
|
+
return m
|
|
101
|
+
|
|
56
102
|
|
|
57
103
|
ListPromptVersionsObject = Literal["list",]
|
|
58
104
|
|
|
@@ -65,6 +111,7 @@ ListPromptVersionsModelType = Literal[
|
|
|
65
111
|
"tts",
|
|
66
112
|
"stt",
|
|
67
113
|
"rerank",
|
|
114
|
+
"ocr",
|
|
68
115
|
"moderation",
|
|
69
116
|
"vision",
|
|
70
117
|
]
|
|
@@ -105,39 +152,43 @@ ListPromptVersionsResponseFormat4 = Literal[
|
|
|
105
152
|
]
|
|
106
153
|
|
|
107
154
|
|
|
108
|
-
|
|
155
|
+
ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType = (
|
|
156
|
+
Literal["text",]
|
|
157
|
+
)
|
|
109
158
|
|
|
110
159
|
|
|
111
160
|
class ListPromptVersionsResponseFormat3TypedDict(TypedDict):
|
|
112
|
-
type:
|
|
161
|
+
type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
|
|
113
162
|
|
|
114
163
|
|
|
115
164
|
class ListPromptVersionsResponseFormat3(BaseModel):
|
|
116
|
-
type:
|
|
165
|
+
type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
|
|
117
166
|
|
|
118
167
|
|
|
119
|
-
|
|
168
|
+
ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONType = Literal[
|
|
169
|
+
"json_object",
|
|
170
|
+
]
|
|
120
171
|
|
|
121
172
|
|
|
122
173
|
class ListPromptVersionsResponseFormat2TypedDict(TypedDict):
|
|
123
|
-
type:
|
|
174
|
+
type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONType
|
|
124
175
|
|
|
125
176
|
|
|
126
177
|
class ListPromptVersionsResponseFormat2(BaseModel):
|
|
127
|
-
type:
|
|
178
|
+
type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONType
|
|
128
179
|
|
|
129
180
|
|
|
130
|
-
|
|
181
|
+
ListPromptVersionsResponseFormatPromptsResponse200Type = Literal["json_schema",]
|
|
131
182
|
|
|
132
183
|
|
|
133
|
-
class
|
|
184
|
+
class ListPromptVersionsResponseFormatPromptsResponseJSONSchemaTypedDict(TypedDict):
|
|
134
185
|
name: str
|
|
135
186
|
schema_: Dict[str, Any]
|
|
136
187
|
description: NotRequired[str]
|
|
137
188
|
strict: NotRequired[bool]
|
|
138
189
|
|
|
139
190
|
|
|
140
|
-
class
|
|
191
|
+
class ListPromptVersionsResponseFormatPromptsResponseJSONSchema(BaseModel):
|
|
141
192
|
name: str
|
|
142
193
|
|
|
143
194
|
schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
|
|
@@ -146,23 +197,55 @@ class ListPromptVersionsResponseFormatJSONSchema(BaseModel):
|
|
|
146
197
|
|
|
147
198
|
strict: Optional[bool] = None
|
|
148
199
|
|
|
200
|
+
@model_serializer(mode="wrap")
|
|
201
|
+
def serialize_model(self, handler):
|
|
202
|
+
optional_fields = set(["description", "strict"])
|
|
203
|
+
serialized = handler(self)
|
|
204
|
+
m = {}
|
|
205
|
+
|
|
206
|
+
for n, f in type(self).model_fields.items():
|
|
207
|
+
k = f.alias or n
|
|
208
|
+
val = serialized.get(k)
|
|
209
|
+
|
|
210
|
+
if val != UNSET_SENTINEL:
|
|
211
|
+
if val is not None or k not in optional_fields:
|
|
212
|
+
m[k] = val
|
|
213
|
+
|
|
214
|
+
return m
|
|
215
|
+
|
|
149
216
|
|
|
150
217
|
class ListPromptVersionsResponseFormat1TypedDict(TypedDict):
|
|
151
|
-
type:
|
|
152
|
-
json_schema:
|
|
218
|
+
type: ListPromptVersionsResponseFormatPromptsResponse200Type
|
|
219
|
+
json_schema: ListPromptVersionsResponseFormatPromptsResponseJSONSchemaTypedDict
|
|
153
220
|
display_name: NotRequired[str]
|
|
154
221
|
|
|
155
222
|
|
|
156
223
|
class ListPromptVersionsResponseFormat1(BaseModel):
|
|
157
|
-
type:
|
|
224
|
+
type: ListPromptVersionsResponseFormatPromptsResponse200Type
|
|
158
225
|
|
|
159
|
-
json_schema:
|
|
226
|
+
json_schema: ListPromptVersionsResponseFormatPromptsResponseJSONSchema
|
|
160
227
|
|
|
161
228
|
display_name: Optional[str] = None
|
|
162
229
|
|
|
230
|
+
@model_serializer(mode="wrap")
|
|
231
|
+
def serialize_model(self, handler):
|
|
232
|
+
optional_fields = set(["display_name"])
|
|
233
|
+
serialized = handler(self)
|
|
234
|
+
m = {}
|
|
235
|
+
|
|
236
|
+
for n, f in type(self).model_fields.items():
|
|
237
|
+
k = f.alias or n
|
|
238
|
+
val = serialized.get(k)
|
|
239
|
+
|
|
240
|
+
if val != UNSET_SENTINEL:
|
|
241
|
+
if val is not None or k not in optional_fields:
|
|
242
|
+
m[k] = val
|
|
163
243
|
|
|
164
|
-
|
|
165
|
-
|
|
244
|
+
return m
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
ListPromptVersionsPromptsResponseFormatTypedDict = TypeAliasType(
|
|
248
|
+
"ListPromptVersionsPromptsResponseFormatTypedDict",
|
|
166
249
|
Union[
|
|
167
250
|
ListPromptVersionsResponseFormat2TypedDict,
|
|
168
251
|
ListPromptVersionsResponseFormat3TypedDict,
|
|
@@ -182,8 +265,8 @@ Important: when using JSON mode, you must also instruct the model to produce JSO
|
|
|
182
265
|
"""
|
|
183
266
|
|
|
184
267
|
|
|
185
|
-
|
|
186
|
-
"
|
|
268
|
+
ListPromptVersionsPromptsResponseFormat = TypeAliasType(
|
|
269
|
+
"ListPromptVersionsPromptsResponseFormat",
|
|
187
270
|
Union[
|
|
188
271
|
ListPromptVersionsResponseFormat2,
|
|
189
272
|
ListPromptVersionsResponseFormat3,
|
|
@@ -217,7 +300,7 @@ ListPromptVersionsEncodingFormat = Literal[
|
|
|
217
300
|
r"""The format to return the embeddings"""
|
|
218
301
|
|
|
219
302
|
|
|
220
|
-
|
|
303
|
+
ListPromptVersionsPromptsReasoningEffort = Literal[
|
|
221
304
|
"none",
|
|
222
305
|
"disable",
|
|
223
306
|
"minimal",
|
|
@@ -270,7 +353,9 @@ class ListPromptVersionsModelParametersTypedDict(TypedDict):
|
|
|
270
353
|
r"""Only supported on `image` models."""
|
|
271
354
|
style: NotRequired[str]
|
|
272
355
|
r"""Only supported on `image` models."""
|
|
273
|
-
response_format: NotRequired[
|
|
356
|
+
response_format: NotRequired[
|
|
357
|
+
Nullable[ListPromptVersionsPromptsResponseFormatTypedDict]
|
|
358
|
+
]
|
|
274
359
|
r"""An object specifying the format that the model must output.
|
|
275
360
|
|
|
276
361
|
Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
|
|
@@ -283,7 +368,7 @@ class ListPromptVersionsModelParametersTypedDict(TypedDict):
|
|
|
283
368
|
r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
|
|
284
369
|
encoding_format: NotRequired[ListPromptVersionsEncodingFormat]
|
|
285
370
|
r"""The format to return the embeddings"""
|
|
286
|
-
reasoning_effort: NotRequired[
|
|
371
|
+
reasoning_effort: NotRequired[ListPromptVersionsPromptsReasoningEffort]
|
|
287
372
|
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
288
373
|
budget_tokens: NotRequired[float]
|
|
289
374
|
r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
|
|
@@ -339,7 +424,7 @@ class ListPromptVersionsModelParameters(BaseModel):
|
|
|
339
424
|
r"""Only supported on `image` models."""
|
|
340
425
|
|
|
341
426
|
response_format: Annotated[
|
|
342
|
-
OptionalNullable[
|
|
427
|
+
OptionalNullable[ListPromptVersionsPromptsResponseFormat],
|
|
343
428
|
pydantic.Field(alias="responseFormat"),
|
|
344
429
|
] = UNSET
|
|
345
430
|
r"""An object specifying the format that the model must output.
|
|
@@ -361,7 +446,7 @@ class ListPromptVersionsModelParameters(BaseModel):
|
|
|
361
446
|
r"""The format to return the embeddings"""
|
|
362
447
|
|
|
363
448
|
reasoning_effort: Annotated[
|
|
364
|
-
Optional[
|
|
449
|
+
Optional[ListPromptVersionsPromptsReasoningEffort],
|
|
365
450
|
pydantic.Field(alias="reasoningEffort"),
|
|
366
451
|
] = None
|
|
367
452
|
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
@@ -381,51 +466,48 @@ class ListPromptVersionsModelParameters(BaseModel):
|
|
|
381
466
|
|
|
382
467
|
@model_serializer(mode="wrap")
|
|
383
468
|
def serialize_model(self, handler):
|
|
384
|
-
optional_fields =
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
469
|
+
optional_fields = set(
|
|
470
|
+
[
|
|
471
|
+
"temperature",
|
|
472
|
+
"maxTokens",
|
|
473
|
+
"topK",
|
|
474
|
+
"topP",
|
|
475
|
+
"frequencyPenalty",
|
|
476
|
+
"presencePenalty",
|
|
477
|
+
"numImages",
|
|
478
|
+
"seed",
|
|
479
|
+
"format",
|
|
480
|
+
"dimensions",
|
|
481
|
+
"quality",
|
|
482
|
+
"style",
|
|
483
|
+
"responseFormat",
|
|
484
|
+
"photoRealVersion",
|
|
485
|
+
"encoding_format",
|
|
486
|
+
"reasoningEffort",
|
|
487
|
+
"budgetTokens",
|
|
488
|
+
"verbosity",
|
|
489
|
+
"thinkingLevel",
|
|
490
|
+
]
|
|
491
|
+
)
|
|
492
|
+
nullable_fields = set(["responseFormat"])
|
|
408
493
|
serialized = handler(self)
|
|
409
|
-
|
|
410
494
|
m = {}
|
|
411
495
|
|
|
412
496
|
for n, f in type(self).model_fields.items():
|
|
413
497
|
k = f.alias or n
|
|
414
498
|
val = serialized.get(k)
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
):
|
|
428
|
-
m[k] = val
|
|
499
|
+
is_nullable_and_explicitly_set = (
|
|
500
|
+
k in nullable_fields
|
|
501
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
if val != UNSET_SENTINEL:
|
|
505
|
+
if (
|
|
506
|
+
val is not None
|
|
507
|
+
or k not in optional_fields
|
|
508
|
+
or is_nullable_and_explicitly_set
|
|
509
|
+
):
|
|
510
|
+
m[k] = val
|
|
429
511
|
|
|
430
512
|
return m
|
|
431
513
|
|
|
@@ -501,6 +583,22 @@ class ListPromptVersions2File(BaseModel):
|
|
|
501
583
|
filename: Optional[str] = None
|
|
502
584
|
r"""The name of the file, used when passing the file to the model as a string."""
|
|
503
585
|
|
|
586
|
+
@model_serializer(mode="wrap")
|
|
587
|
+
def serialize_model(self, handler):
|
|
588
|
+
optional_fields = set(["file_data", "uri", "mimeType", "filename"])
|
|
589
|
+
serialized = handler(self)
|
|
590
|
+
m = {}
|
|
591
|
+
|
|
592
|
+
for n, f in type(self).model_fields.items():
|
|
593
|
+
k = f.alias or n
|
|
594
|
+
val = serialized.get(k)
|
|
595
|
+
|
|
596
|
+
if val != UNSET_SENTINEL:
|
|
597
|
+
if val is not None or k not in optional_fields:
|
|
598
|
+
m[k] = val
|
|
599
|
+
|
|
600
|
+
return m
|
|
601
|
+
|
|
504
602
|
|
|
505
603
|
class ListPromptVersions23TypedDict(TypedDict):
|
|
506
604
|
type: ListPromptVersions2PromptsResponseType
|
|
@@ -537,6 +635,22 @@ class ListPromptVersions2ImageURL(BaseModel):
|
|
|
537
635
|
detail: Optional[str] = None
|
|
538
636
|
r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
|
|
539
637
|
|
|
638
|
+
@model_serializer(mode="wrap")
|
|
639
|
+
def serialize_model(self, handler):
|
|
640
|
+
optional_fields = set(["id", "detail"])
|
|
641
|
+
serialized = handler(self)
|
|
642
|
+
m = {}
|
|
643
|
+
|
|
644
|
+
for n, f in type(self).model_fields.items():
|
|
645
|
+
k = f.alias or n
|
|
646
|
+
val = serialized.get(k)
|
|
647
|
+
|
|
648
|
+
if val != UNSET_SENTINEL:
|
|
649
|
+
if val is not None or k not in optional_fields:
|
|
650
|
+
m[k] = val
|
|
651
|
+
|
|
652
|
+
return m
|
|
653
|
+
|
|
540
654
|
|
|
541
655
|
class ListPromptVersions22TypedDict(TypedDict):
|
|
542
656
|
r"""The image part of the prompt message. Only supported with vision models."""
|
|
@@ -604,7 +718,7 @@ ListPromptVersionsContent = TypeAliasType(
|
|
|
604
718
|
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
605
719
|
|
|
606
720
|
|
|
607
|
-
|
|
721
|
+
ListPromptVersionsPromptsType = Literal["function",]
|
|
608
722
|
|
|
609
723
|
|
|
610
724
|
class ListPromptVersionsFunctionTypedDict(TypedDict):
|
|
@@ -621,14 +735,14 @@ class ListPromptVersionsFunction(BaseModel):
|
|
|
621
735
|
|
|
622
736
|
|
|
623
737
|
class ListPromptVersionsToolCallsTypedDict(TypedDict):
|
|
624
|
-
type:
|
|
738
|
+
type: ListPromptVersionsPromptsType
|
|
625
739
|
function: ListPromptVersionsFunctionTypedDict
|
|
626
740
|
id: NotRequired[str]
|
|
627
741
|
index: NotRequired[float]
|
|
628
742
|
|
|
629
743
|
|
|
630
744
|
class ListPromptVersionsToolCalls(BaseModel):
|
|
631
|
-
type:
|
|
745
|
+
type: ListPromptVersionsPromptsType
|
|
632
746
|
|
|
633
747
|
function: ListPromptVersionsFunction
|
|
634
748
|
|
|
@@ -636,6 +750,22 @@ class ListPromptVersionsToolCalls(BaseModel):
|
|
|
636
750
|
|
|
637
751
|
index: Optional[float] = None
|
|
638
752
|
|
|
753
|
+
@model_serializer(mode="wrap")
|
|
754
|
+
def serialize_model(self, handler):
|
|
755
|
+
optional_fields = set(["id", "index"])
|
|
756
|
+
serialized = handler(self)
|
|
757
|
+
m = {}
|
|
758
|
+
|
|
759
|
+
for n, f in type(self).model_fields.items():
|
|
760
|
+
k = f.alias or n
|
|
761
|
+
val = serialized.get(k)
|
|
762
|
+
|
|
763
|
+
if val != UNSET_SENTINEL:
|
|
764
|
+
if val is not None or k not in optional_fields:
|
|
765
|
+
m[k] = val
|
|
766
|
+
|
|
767
|
+
return m
|
|
768
|
+
|
|
639
769
|
|
|
640
770
|
class ListPromptVersionsMessagesTypedDict(TypedDict):
|
|
641
771
|
role: ListPromptVersionsRole
|
|
@@ -659,61 +789,62 @@ class ListPromptVersionsMessages(BaseModel):
|
|
|
659
789
|
|
|
660
790
|
@model_serializer(mode="wrap")
|
|
661
791
|
def serialize_model(self, handler):
|
|
662
|
-
optional_fields = ["tool_calls", "tool_call_id"]
|
|
663
|
-
nullable_fields = ["content", "tool_call_id"]
|
|
664
|
-
null_default_fields = []
|
|
665
|
-
|
|
792
|
+
optional_fields = set(["tool_calls", "tool_call_id"])
|
|
793
|
+
nullable_fields = set(["content", "tool_call_id"])
|
|
666
794
|
serialized = handler(self)
|
|
667
|
-
|
|
668
795
|
m = {}
|
|
669
796
|
|
|
670
797
|
for n, f in type(self).model_fields.items():
|
|
671
798
|
k = f.alias or n
|
|
672
799
|
val = serialized.get(k)
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
):
|
|
686
|
-
m[k] = val
|
|
800
|
+
is_nullable_and_explicitly_set = (
|
|
801
|
+
k in nullable_fields
|
|
802
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
803
|
+
)
|
|
804
|
+
|
|
805
|
+
if val != UNSET_SENTINEL:
|
|
806
|
+
if (
|
|
807
|
+
val is not None
|
|
808
|
+
or k not in optional_fields
|
|
809
|
+
or is_nullable_and_explicitly_set
|
|
810
|
+
):
|
|
811
|
+
m[k] = val
|
|
687
812
|
|
|
688
813
|
return m
|
|
689
814
|
|
|
690
815
|
|
|
816
|
+
@deprecated(
|
|
817
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
818
|
+
)
|
|
691
819
|
class ListPromptVersionsPromptConfigTypedDict(TypedDict):
|
|
692
|
-
r"""A list of messages compatible with the openAI schema"""
|
|
820
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
693
821
|
|
|
694
822
|
messages: List[ListPromptVersionsMessagesTypedDict]
|
|
695
823
|
stream: NotRequired[bool]
|
|
696
|
-
model: NotRequired[str]
|
|
824
|
+
model: NotRequired[Nullable[str]]
|
|
697
825
|
model_db_id: NotRequired[Nullable[str]]
|
|
698
826
|
r"""The id of the resource"""
|
|
699
827
|
model_type: NotRequired[Nullable[ListPromptVersionsModelType]]
|
|
700
828
|
r"""The modality of the model"""
|
|
701
829
|
model_parameters: NotRequired[ListPromptVersionsModelParametersTypedDict]
|
|
702
830
|
r"""Model Parameters: Not all parameters apply to every model"""
|
|
703
|
-
provider: NotRequired[ListPromptVersionsProvider]
|
|
831
|
+
provider: NotRequired[Nullable[ListPromptVersionsProvider]]
|
|
704
832
|
integration_id: NotRequired[Nullable[str]]
|
|
705
833
|
r"""The ID of the integration to use"""
|
|
706
834
|
version: NotRequired[str]
|
|
707
835
|
|
|
708
836
|
|
|
837
|
+
@deprecated(
|
|
838
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
839
|
+
)
|
|
709
840
|
class ListPromptVersionsPromptConfig(BaseModel):
|
|
710
|
-
r"""A list of messages compatible with the openAI schema"""
|
|
841
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
711
842
|
|
|
712
843
|
messages: List[ListPromptVersionsMessages]
|
|
713
844
|
|
|
714
845
|
stream: Optional[bool] = None
|
|
715
846
|
|
|
716
|
-
model:
|
|
847
|
+
model: OptionalNullable[str] = UNSET
|
|
717
848
|
|
|
718
849
|
model_db_id: OptionalNullable[str] = UNSET
|
|
719
850
|
r"""The id of the resource"""
|
|
@@ -724,7 +855,7 @@ class ListPromptVersionsPromptConfig(BaseModel):
|
|
|
724
855
|
model_parameters: Optional[ListPromptVersionsModelParameters] = None
|
|
725
856
|
r"""Model Parameters: Not all parameters apply to every model"""
|
|
726
857
|
|
|
727
|
-
provider:
|
|
858
|
+
provider: OptionalNullable[ListPromptVersionsProvider] = UNSET
|
|
728
859
|
|
|
729
860
|
integration_id: OptionalNullable[str] = UNSET
|
|
730
861
|
r"""The ID of the integration to use"""
|
|
@@ -733,181 +864,1524 @@ class ListPromptVersionsPromptConfig(BaseModel):
|
|
|
733
864
|
|
|
734
865
|
@model_serializer(mode="wrap")
|
|
735
866
|
def serialize_model(self, handler):
|
|
736
|
-
optional_fields =
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
867
|
+
optional_fields = set(
|
|
868
|
+
[
|
|
869
|
+
"stream",
|
|
870
|
+
"model",
|
|
871
|
+
"model_db_id",
|
|
872
|
+
"model_type",
|
|
873
|
+
"model_parameters",
|
|
874
|
+
"provider",
|
|
875
|
+
"integration_id",
|
|
876
|
+
"version",
|
|
877
|
+
]
|
|
878
|
+
)
|
|
879
|
+
nullable_fields = set(
|
|
880
|
+
["model", "model_db_id", "model_type", "provider", "integration_id"]
|
|
881
|
+
)
|
|
749
882
|
serialized = handler(self)
|
|
750
|
-
|
|
751
883
|
m = {}
|
|
752
884
|
|
|
753
885
|
for n, f in type(self).model_fields.items():
|
|
754
886
|
k = f.alias or n
|
|
755
887
|
val = serialized.get(k)
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
):
|
|
769
|
-
m[k] = val
|
|
888
|
+
is_nullable_and_explicitly_set = (
|
|
889
|
+
k in nullable_fields
|
|
890
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
if val != UNSET_SENTINEL:
|
|
894
|
+
if (
|
|
895
|
+
val is not None
|
|
896
|
+
or k not in optional_fields
|
|
897
|
+
or is_nullable_and_explicitly_set
|
|
898
|
+
):
|
|
899
|
+
m[k] = val
|
|
770
900
|
|
|
771
901
|
return m
|
|
772
902
|
|
|
773
903
|
|
|
774
|
-
|
|
775
|
-
"
|
|
776
|
-
"
|
|
777
|
-
"
|
|
778
|
-
"
|
|
779
|
-
"
|
|
780
|
-
"
|
|
781
|
-
"Code understanding",
|
|
782
|
-
"Code writing",
|
|
783
|
-
"Conversation",
|
|
784
|
-
"Documents QA",
|
|
785
|
-
"Evaluation",
|
|
786
|
-
"Extraction",
|
|
787
|
-
"Multi-modal",
|
|
788
|
-
"Self-checking",
|
|
789
|
-
"Sentiment analysis",
|
|
790
|
-
"SQL",
|
|
791
|
-
"Summarization",
|
|
792
|
-
"Tagging",
|
|
793
|
-
"Translation (document)",
|
|
794
|
-
"Translation (sentences)",
|
|
904
|
+
ListPromptVersionsVoice = Literal[
|
|
905
|
+
"alloy",
|
|
906
|
+
"echo",
|
|
907
|
+
"fable",
|
|
908
|
+
"onyx",
|
|
909
|
+
"nova",
|
|
910
|
+
"shimmer",
|
|
795
911
|
]
|
|
912
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
796
913
|
|
|
797
914
|
|
|
798
|
-
|
|
799
|
-
"
|
|
800
|
-
"
|
|
801
|
-
"
|
|
802
|
-
"
|
|
803
|
-
"
|
|
804
|
-
"Russian",
|
|
805
|
-
"Spanish",
|
|
915
|
+
ListPromptVersionsPromptsFormat = Literal[
|
|
916
|
+
"wav",
|
|
917
|
+
"mp3",
|
|
918
|
+
"flac",
|
|
919
|
+
"opus",
|
|
920
|
+
"pcm16",
|
|
806
921
|
]
|
|
807
|
-
r"""
|
|
922
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
808
923
|
|
|
809
924
|
|
|
810
|
-
class
|
|
811
|
-
|
|
812
|
-
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
813
|
-
language: NotRequired[Nullable[ListPromptVersionsLanguage]]
|
|
814
|
-
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
925
|
+
class ListPromptVersionsAudioTypedDict(TypedDict):
|
|
926
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
815
927
|
|
|
928
|
+
voice: ListPromptVersionsVoice
|
|
929
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
930
|
+
format_: ListPromptVersionsPromptsFormat
|
|
931
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
816
932
|
|
|
817
|
-
class ListPromptVersionsMetadata(BaseModel):
|
|
818
|
-
use_cases: Optional[List[ListPromptVersionsUseCases]] = None
|
|
819
|
-
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
820
933
|
|
|
821
|
-
|
|
822
|
-
r"""
|
|
934
|
+
class ListPromptVersionsAudio(BaseModel):
|
|
935
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
936
|
+
|
|
937
|
+
voice: ListPromptVersionsVoice
|
|
938
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
939
|
+
|
|
940
|
+
format_: Annotated[ListPromptVersionsPromptsFormat, pydantic.Field(alias="format")]
|
|
941
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
ListPromptVersionsResponseFormatPromptsResponseType = Literal["json_schema",]
|
|
945
|
+
|
|
946
|
+
|
|
947
|
+
class ListPromptVersionsResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
948
|
+
name: str
|
|
949
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
950
|
+
description: NotRequired[str]
|
|
951
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
952
|
+
schema_: NotRequired[Any]
|
|
953
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
954
|
+
strict: NotRequired[bool]
|
|
955
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
956
|
+
|
|
957
|
+
|
|
958
|
+
class ListPromptVersionsResponseFormatJSONSchema(BaseModel):
|
|
959
|
+
name: str
|
|
960
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
961
|
+
|
|
962
|
+
description: Optional[str] = None
|
|
963
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
964
|
+
|
|
965
|
+
schema_: Annotated[Optional[Any], pydantic.Field(alias="schema")] = None
|
|
966
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
967
|
+
|
|
968
|
+
strict: Optional[bool] = False
|
|
969
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
823
970
|
|
|
824
971
|
@model_serializer(mode="wrap")
|
|
825
972
|
def serialize_model(self, handler):
|
|
826
|
-
optional_fields = ["
|
|
827
|
-
nullable_fields = ["language"]
|
|
828
|
-
null_default_fields = []
|
|
829
|
-
|
|
973
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
830
974
|
serialized = handler(self)
|
|
831
|
-
|
|
832
975
|
m = {}
|
|
833
976
|
|
|
834
977
|
for n, f in type(self).model_fields.items():
|
|
835
978
|
k = f.alias or n
|
|
836
979
|
val = serialized.get(k)
|
|
837
|
-
serialized.pop(k, None)
|
|
838
|
-
|
|
839
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
840
|
-
is_set = (
|
|
841
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
842
|
-
or k in null_default_fields
|
|
843
|
-
) # pylint: disable=no-member
|
|
844
980
|
|
|
845
|
-
if val
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
849
|
-
):
|
|
850
|
-
m[k] = val
|
|
981
|
+
if val != UNSET_SENTINEL:
|
|
982
|
+
if val is not None or k not in optional_fields:
|
|
983
|
+
m[k] = val
|
|
851
984
|
|
|
852
985
|
return m
|
|
853
986
|
|
|
854
987
|
|
|
855
|
-
class
|
|
856
|
-
|
|
857
|
-
prompt_config: ListPromptVersionsPromptConfigTypedDict
|
|
858
|
-
r"""A list of messages compatible with the openAI schema"""
|
|
859
|
-
timestamp: str
|
|
860
|
-
created_by_id: NotRequired[Nullable[str]]
|
|
861
|
-
updated_by_id: NotRequired[Nullable[str]]
|
|
862
|
-
description: NotRequired[Nullable[str]]
|
|
863
|
-
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
864
|
-
metadata: NotRequired[ListPromptVersionsMetadataTypedDict]
|
|
988
|
+
class ListPromptVersionsResponseFormatPromptsJSONSchemaTypedDict(TypedDict):
|
|
989
|
+
r"""
|
|
865
990
|
|
|
991
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
992
|
+
"""
|
|
866
993
|
|
|
867
|
-
|
|
868
|
-
|
|
994
|
+
type: ListPromptVersionsResponseFormatPromptsResponseType
|
|
995
|
+
json_schema: ListPromptVersionsResponseFormatJSONSchemaTypedDict
|
|
869
996
|
|
|
870
|
-
prompt_config: ListPromptVersionsPromptConfig
|
|
871
|
-
r"""A list of messages compatible with the openAI schema"""
|
|
872
997
|
|
|
873
|
-
|
|
998
|
+
class ListPromptVersionsResponseFormatPromptsJSONSchema(BaseModel):
|
|
999
|
+
r"""
|
|
874
1000
|
|
|
875
|
-
|
|
1001
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
1002
|
+
"""
|
|
876
1003
|
|
|
877
|
-
|
|
1004
|
+
type: ListPromptVersionsResponseFormatPromptsResponseType
|
|
878
1005
|
|
|
879
|
-
|
|
880
|
-
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
1006
|
+
json_schema: ListPromptVersionsResponseFormatJSONSchema
|
|
881
1007
|
|
|
882
|
-
metadata: Optional[ListPromptVersionsMetadata] = None
|
|
883
1008
|
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
1009
|
+
ListPromptVersionsResponseFormatPromptsType = Literal["json_object",]
|
|
1010
|
+
|
|
1011
|
+
|
|
1012
|
+
class ListPromptVersionsResponseFormatJSONObjectTypedDict(TypedDict):
|
|
1013
|
+
r"""
|
|
1014
|
+
|
|
1015
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
1016
|
+
"""
|
|
1017
|
+
|
|
1018
|
+
type: ListPromptVersionsResponseFormatPromptsType
|
|
1019
|
+
|
|
1020
|
+
|
|
1021
|
+
class ListPromptVersionsResponseFormatJSONObject(BaseModel):
|
|
1022
|
+
r"""
|
|
1023
|
+
|
|
1024
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
1025
|
+
"""
|
|
1026
|
+
|
|
1027
|
+
type: ListPromptVersionsResponseFormatPromptsType
|
|
1028
|
+
|
|
1029
|
+
|
|
1030
|
+
ListPromptVersionsResponseFormatType = Literal["text",]
|
|
1031
|
+
|
|
1032
|
+
|
|
1033
|
+
class ListPromptVersionsResponseFormatTextTypedDict(TypedDict):
|
|
1034
|
+
r"""
|
|
1035
|
+
|
|
1036
|
+
Default response format. Used to generate text responses
|
|
1037
|
+
"""
|
|
1038
|
+
|
|
1039
|
+
type: ListPromptVersionsResponseFormatType
|
|
889
1040
|
|
|
890
|
-
serialized = handler(self)
|
|
891
1041
|
|
|
1042
|
+
class ListPromptVersionsResponseFormatText(BaseModel):
|
|
1043
|
+
r"""
|
|
1044
|
+
|
|
1045
|
+
Default response format. Used to generate text responses
|
|
1046
|
+
"""
|
|
1047
|
+
|
|
1048
|
+
type: ListPromptVersionsResponseFormatType
|
|
1049
|
+
|
|
1050
|
+
|
|
1051
|
+
ListPromptVersionsResponseFormatTypedDict = TypeAliasType(
|
|
1052
|
+
"ListPromptVersionsResponseFormatTypedDict",
|
|
1053
|
+
Union[
|
|
1054
|
+
ListPromptVersionsResponseFormatTextTypedDict,
|
|
1055
|
+
ListPromptVersionsResponseFormatJSONObjectTypedDict,
|
|
1056
|
+
ListPromptVersionsResponseFormatPromptsJSONSchemaTypedDict,
|
|
1057
|
+
],
|
|
1058
|
+
)
|
|
1059
|
+
r"""An object specifying the format that the model must output"""
|
|
1060
|
+
|
|
1061
|
+
|
|
1062
|
+
ListPromptVersionsResponseFormat = Annotated[
|
|
1063
|
+
Union[
|
|
1064
|
+
Annotated[ListPromptVersionsResponseFormatText, Tag("text")],
|
|
1065
|
+
Annotated[ListPromptVersionsResponseFormatJSONObject, Tag("json_object")],
|
|
1066
|
+
Annotated[
|
|
1067
|
+
ListPromptVersionsResponseFormatPromptsJSONSchema, Tag("json_schema")
|
|
1068
|
+
],
|
|
1069
|
+
],
|
|
1070
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1071
|
+
]
|
|
1072
|
+
r"""An object specifying the format that the model must output"""
|
|
1073
|
+
|
|
1074
|
+
|
|
1075
|
+
ListPromptVersionsReasoningEffort = Literal[
|
|
1076
|
+
"none",
|
|
1077
|
+
"minimal",
|
|
1078
|
+
"low",
|
|
1079
|
+
"medium",
|
|
1080
|
+
"high",
|
|
1081
|
+
"xhigh",
|
|
1082
|
+
]
|
|
1083
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1084
|
+
|
|
1085
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1086
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1087
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1088
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1089
|
+
|
|
1090
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
1091
|
+
"""
|
|
1092
|
+
|
|
1093
|
+
|
|
1094
|
+
ListPromptVersionsStopTypedDict = TypeAliasType(
|
|
1095
|
+
"ListPromptVersionsStopTypedDict", Union[str, List[str]]
|
|
1096
|
+
)
|
|
1097
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1098
|
+
|
|
1099
|
+
|
|
1100
|
+
ListPromptVersionsStop = TypeAliasType("ListPromptVersionsStop", Union[str, List[str]])
|
|
1101
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1102
|
+
|
|
1103
|
+
|
|
1104
|
+
class ListPromptVersionsStreamOptionsTypedDict(TypedDict):
|
|
1105
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1106
|
+
|
|
1107
|
+
include_usage: NotRequired[bool]
|
|
1108
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
1109
|
+
|
|
1110
|
+
|
|
1111
|
+
class ListPromptVersionsStreamOptions(BaseModel):
|
|
1112
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1113
|
+
|
|
1114
|
+
include_usage: Optional[bool] = None
|
|
1115
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
1116
|
+
|
|
1117
|
+
@model_serializer(mode="wrap")
|
|
1118
|
+
def serialize_model(self, handler):
|
|
1119
|
+
optional_fields = set(["include_usage"])
|
|
1120
|
+
serialized = handler(self)
|
|
1121
|
+
m = {}
|
|
1122
|
+
|
|
1123
|
+
for n, f in type(self).model_fields.items():
|
|
1124
|
+
k = f.alias or n
|
|
1125
|
+
val = serialized.get(k)
|
|
1126
|
+
|
|
1127
|
+
if val != UNSET_SENTINEL:
|
|
1128
|
+
if val is not None or k not in optional_fields:
|
|
1129
|
+
m[k] = val
|
|
1130
|
+
|
|
1131
|
+
return m
|
|
1132
|
+
|
|
1133
|
+
|
|
1134
|
+
ListPromptVersionsThinkingTypedDict = TypeAliasType(
|
|
1135
|
+
"ListPromptVersionsThinkingTypedDict",
|
|
1136
|
+
Union[ThinkingConfigDisabledSchemaTypedDict, ThinkingConfigEnabledSchemaTypedDict],
|
|
1137
|
+
)
|
|
1138
|
+
|
|
1139
|
+
|
|
1140
|
+
ListPromptVersionsThinking = Annotated[
|
|
1141
|
+
Union[
|
|
1142
|
+
Annotated[ThinkingConfigDisabledSchema, Tag("disabled")],
|
|
1143
|
+
Annotated[ThinkingConfigEnabledSchema, Tag("enabled")],
|
|
1144
|
+
],
|
|
1145
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1146
|
+
]
|
|
1147
|
+
|
|
1148
|
+
|
|
1149
|
+
ListPromptVersionsToolChoiceType = Literal["function",]
|
|
1150
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
1151
|
+
|
|
1152
|
+
|
|
1153
|
+
class ListPromptVersionsToolChoiceFunctionTypedDict(TypedDict):
|
|
1154
|
+
name: str
|
|
1155
|
+
r"""The name of the function to call."""
|
|
1156
|
+
|
|
1157
|
+
|
|
1158
|
+
class ListPromptVersionsToolChoiceFunction(BaseModel):
|
|
1159
|
+
name: str
|
|
1160
|
+
r"""The name of the function to call."""
|
|
1161
|
+
|
|
1162
|
+
|
|
1163
|
+
class ListPromptVersionsToolChoice2TypedDict(TypedDict):
|
|
1164
|
+
function: ListPromptVersionsToolChoiceFunctionTypedDict
|
|
1165
|
+
type: NotRequired[ListPromptVersionsToolChoiceType]
|
|
1166
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
1167
|
+
|
|
1168
|
+
|
|
1169
|
+
class ListPromptVersionsToolChoice2(BaseModel):
|
|
1170
|
+
function: ListPromptVersionsToolChoiceFunction
|
|
1171
|
+
|
|
1172
|
+
type: Optional[ListPromptVersionsToolChoiceType] = None
|
|
1173
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
1174
|
+
|
|
1175
|
+
@model_serializer(mode="wrap")
|
|
1176
|
+
def serialize_model(self, handler):
|
|
1177
|
+
optional_fields = set(["type"])
|
|
1178
|
+
serialized = handler(self)
|
|
1179
|
+
m = {}
|
|
1180
|
+
|
|
1181
|
+
for n, f in type(self).model_fields.items():
|
|
1182
|
+
k = f.alias or n
|
|
1183
|
+
val = serialized.get(k)
|
|
1184
|
+
|
|
1185
|
+
if val != UNSET_SENTINEL:
|
|
1186
|
+
if val is not None or k not in optional_fields:
|
|
1187
|
+
m[k] = val
|
|
1188
|
+
|
|
1189
|
+
return m
|
|
1190
|
+
|
|
1191
|
+
|
|
1192
|
+
ListPromptVersionsToolChoice1 = Literal[
|
|
1193
|
+
"none",
|
|
1194
|
+
"auto",
|
|
1195
|
+
"required",
|
|
1196
|
+
]
|
|
1197
|
+
|
|
1198
|
+
|
|
1199
|
+
ListPromptVersionsToolChoiceTypedDict = TypeAliasType(
|
|
1200
|
+
"ListPromptVersionsToolChoiceTypedDict",
|
|
1201
|
+
Union[ListPromptVersionsToolChoice2TypedDict, ListPromptVersionsToolChoice1],
|
|
1202
|
+
)
|
|
1203
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1204
|
+
|
|
1205
|
+
|
|
1206
|
+
ListPromptVersionsToolChoice = TypeAliasType(
|
|
1207
|
+
"ListPromptVersionsToolChoice",
|
|
1208
|
+
Union[ListPromptVersionsToolChoice2, ListPromptVersionsToolChoice1],
|
|
1209
|
+
)
|
|
1210
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1211
|
+
|
|
1212
|
+
|
|
1213
|
+
ListPromptVersionsModalities = Literal[
|
|
1214
|
+
"text",
|
|
1215
|
+
"audio",
|
|
1216
|
+
]
|
|
1217
|
+
|
|
1218
|
+
|
|
1219
|
+
ListPromptVersionsID1 = Literal[
|
|
1220
|
+
"orq_pii_detection",
|
|
1221
|
+
"orq_sexual_moderation",
|
|
1222
|
+
"orq_harmful_moderation",
|
|
1223
|
+
]
|
|
1224
|
+
r"""The key of the guardrail."""
|
|
1225
|
+
|
|
1226
|
+
|
|
1227
|
+
ListPromptVersionsIDTypedDict = TypeAliasType(
|
|
1228
|
+
"ListPromptVersionsIDTypedDict", Union[ListPromptVersionsID1, str]
|
|
1229
|
+
)
|
|
1230
|
+
|
|
1231
|
+
|
|
1232
|
+
ListPromptVersionsID = TypeAliasType(
|
|
1233
|
+
"ListPromptVersionsID", Union[ListPromptVersionsID1, str]
|
|
1234
|
+
)
|
|
1235
|
+
|
|
1236
|
+
|
|
1237
|
+
ListPromptVersionsExecuteOn = Literal[
|
|
1238
|
+
"input",
|
|
1239
|
+
"output",
|
|
1240
|
+
]
|
|
1241
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1242
|
+
|
|
1243
|
+
|
|
1244
|
+
class ListPromptVersionsGuardrailsTypedDict(TypedDict):
|
|
1245
|
+
id: ListPromptVersionsIDTypedDict
|
|
1246
|
+
execute_on: ListPromptVersionsExecuteOn
|
|
1247
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
class ListPromptVersionsGuardrails(BaseModel):
|
|
1251
|
+
id: ListPromptVersionsID
|
|
1252
|
+
|
|
1253
|
+
execute_on: ListPromptVersionsExecuteOn
|
|
1254
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1255
|
+
|
|
1256
|
+
|
|
1257
|
+
class ListPromptVersionsFallbacksTypedDict(TypedDict):
|
|
1258
|
+
model: str
|
|
1259
|
+
r"""Fallback model identifier"""
|
|
1260
|
+
|
|
1261
|
+
|
|
1262
|
+
class ListPromptVersionsFallbacks(BaseModel):
|
|
1263
|
+
model: str
|
|
1264
|
+
r"""Fallback model identifier"""
|
|
1265
|
+
|
|
1266
|
+
|
|
1267
|
+
class ListPromptVersionsRetryTypedDict(TypedDict):
|
|
1268
|
+
r"""Retry configuration for the request"""
|
|
1269
|
+
|
|
1270
|
+
count: NotRequired[float]
|
|
1271
|
+
r"""Number of retry attempts (1-5)"""
|
|
1272
|
+
on_codes: NotRequired[List[float]]
|
|
1273
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1274
|
+
|
|
1275
|
+
|
|
1276
|
+
class ListPromptVersionsRetry(BaseModel):
|
|
1277
|
+
r"""Retry configuration for the request"""
|
|
1278
|
+
|
|
1279
|
+
count: Optional[float] = 3
|
|
1280
|
+
r"""Number of retry attempts (1-5)"""
|
|
1281
|
+
|
|
1282
|
+
on_codes: Optional[List[float]] = None
|
|
1283
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1284
|
+
|
|
1285
|
+
@model_serializer(mode="wrap")
|
|
1286
|
+
def serialize_model(self, handler):
|
|
1287
|
+
optional_fields = set(["count", "on_codes"])
|
|
1288
|
+
serialized = handler(self)
|
|
1289
|
+
m = {}
|
|
1290
|
+
|
|
1291
|
+
for n, f in type(self).model_fields.items():
|
|
1292
|
+
k = f.alias or n
|
|
1293
|
+
val = serialized.get(k)
|
|
1294
|
+
|
|
1295
|
+
if val != UNSET_SENTINEL:
|
|
1296
|
+
if val is not None or k not in optional_fields:
|
|
1297
|
+
m[k] = val
|
|
1298
|
+
|
|
1299
|
+
return m
|
|
1300
|
+
|
|
1301
|
+
|
|
1302
|
+
ListPromptVersionsType = Literal["exact_match",]
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
class ListPromptVersionsCacheTypedDict(TypedDict):
|
|
1306
|
+
r"""Cache configuration for the request."""
|
|
1307
|
+
|
|
1308
|
+
type: ListPromptVersionsType
|
|
1309
|
+
ttl: NotRequired[float]
|
|
1310
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
class ListPromptVersionsCache(BaseModel):
|
|
1314
|
+
r"""Cache configuration for the request."""
|
|
1315
|
+
|
|
1316
|
+
type: ListPromptVersionsType
|
|
1317
|
+
|
|
1318
|
+
ttl: Optional[float] = 1800
|
|
1319
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1320
|
+
|
|
1321
|
+
@model_serializer(mode="wrap")
|
|
1322
|
+
def serialize_model(self, handler):
|
|
1323
|
+
optional_fields = set(["ttl"])
|
|
1324
|
+
serialized = handler(self)
|
|
1325
|
+
m = {}
|
|
1326
|
+
|
|
1327
|
+
for n, f in type(self).model_fields.items():
|
|
1328
|
+
k = f.alias or n
|
|
1329
|
+
val = serialized.get(k)
|
|
1330
|
+
|
|
1331
|
+
if val != UNSET_SENTINEL:
|
|
1332
|
+
if val is not None or k not in optional_fields:
|
|
1333
|
+
m[k] = val
|
|
1334
|
+
|
|
1335
|
+
return m
|
|
1336
|
+
|
|
1337
|
+
|
|
1338
|
+
ListPromptVersionsLoadBalancerType = Literal["weight_based",]
|
|
1339
|
+
|
|
1340
|
+
|
|
1341
|
+
class ListPromptVersionsLoadBalancerModelsTypedDict(TypedDict):
|
|
1342
|
+
model: str
|
|
1343
|
+
r"""Model identifier for load balancing"""
|
|
1344
|
+
weight: NotRequired[float]
|
|
1345
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1346
|
+
|
|
1347
|
+
|
|
1348
|
+
class ListPromptVersionsLoadBalancerModels(BaseModel):
|
|
1349
|
+
model: str
|
|
1350
|
+
r"""Model identifier for load balancing"""
|
|
1351
|
+
|
|
1352
|
+
weight: Optional[float] = 0.5
|
|
1353
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1354
|
+
|
|
1355
|
+
@model_serializer(mode="wrap")
|
|
1356
|
+
def serialize_model(self, handler):
|
|
1357
|
+
optional_fields = set(["weight"])
|
|
1358
|
+
serialized = handler(self)
|
|
1359
|
+
m = {}
|
|
1360
|
+
|
|
1361
|
+
for n, f in type(self).model_fields.items():
|
|
1362
|
+
k = f.alias or n
|
|
1363
|
+
val = serialized.get(k)
|
|
1364
|
+
|
|
1365
|
+
if val != UNSET_SENTINEL:
|
|
1366
|
+
if val is not None or k not in optional_fields:
|
|
1367
|
+
m[k] = val
|
|
1368
|
+
|
|
1369
|
+
return m
|
|
1370
|
+
|
|
1371
|
+
|
|
1372
|
+
class ListPromptVersionsLoadBalancer1TypedDict(TypedDict):
|
|
1373
|
+
type: ListPromptVersionsLoadBalancerType
|
|
1374
|
+
models: List[ListPromptVersionsLoadBalancerModelsTypedDict]
|
|
1375
|
+
|
|
1376
|
+
|
|
1377
|
+
class ListPromptVersionsLoadBalancer1(BaseModel):
|
|
1378
|
+
type: ListPromptVersionsLoadBalancerType
|
|
1379
|
+
|
|
1380
|
+
models: List[ListPromptVersionsLoadBalancerModels]
|
|
1381
|
+
|
|
1382
|
+
|
|
1383
|
+
ListPromptVersionsLoadBalancerTypedDict = ListPromptVersionsLoadBalancer1TypedDict
|
|
1384
|
+
r"""Load balancer configuration for the request."""
|
|
1385
|
+
|
|
1386
|
+
|
|
1387
|
+
ListPromptVersionsLoadBalancer = ListPromptVersionsLoadBalancer1
|
|
1388
|
+
r"""Load balancer configuration for the request."""
|
|
1389
|
+
|
|
1390
|
+
|
|
1391
|
+
class ListPromptVersionsTimeoutTypedDict(TypedDict):
|
|
1392
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1393
|
+
|
|
1394
|
+
call_timeout: float
|
|
1395
|
+
r"""Timeout value in milliseconds"""
|
|
1396
|
+
|
|
1397
|
+
|
|
1398
|
+
class ListPromptVersionsTimeout(BaseModel):
|
|
1399
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1400
|
+
|
|
1401
|
+
call_timeout: float
|
|
1402
|
+
r"""Timeout value in milliseconds"""
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
ListPromptVersionsMessagesPromptsResponse200Role = Literal["tool",]
|
|
1406
|
+
r"""The role of the messages author, in this case tool."""
|
|
1407
|
+
|
|
1408
|
+
|
|
1409
|
+
ListPromptVersionsContentPromptsResponse2002TypedDict = TextContentPartSchemaTypedDict
|
|
1410
|
+
|
|
1411
|
+
|
|
1412
|
+
ListPromptVersionsContentPromptsResponse2002 = TextContentPartSchema
|
|
1413
|
+
|
|
1414
|
+
|
|
1415
|
+
ListPromptVersionsMessagesPromptsResponse200ContentTypedDict = TypeAliasType(
|
|
1416
|
+
"ListPromptVersionsMessagesPromptsResponse200ContentTypedDict",
|
|
1417
|
+
Union[str, List[ListPromptVersionsContentPromptsResponse2002TypedDict]],
|
|
1418
|
+
)
|
|
1419
|
+
r"""The contents of the tool message."""
|
|
1420
|
+
|
|
1421
|
+
|
|
1422
|
+
ListPromptVersionsMessagesPromptsResponse200Content = TypeAliasType(
|
|
1423
|
+
"ListPromptVersionsMessagesPromptsResponse200Content",
|
|
1424
|
+
Union[str, List[ListPromptVersionsContentPromptsResponse2002]],
|
|
1425
|
+
)
|
|
1426
|
+
r"""The contents of the tool message."""
|
|
1427
|
+
|
|
1428
|
+
|
|
1429
|
+
ListPromptVersionsMessagesPromptsType = Literal["ephemeral",]
|
|
1430
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1431
|
+
|
|
1432
|
+
|
|
1433
|
+
ListPromptVersionsMessagesTTL = Literal[
|
|
1434
|
+
"5m",
|
|
1435
|
+
"1h",
|
|
1436
|
+
]
|
|
1437
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1438
|
+
|
|
1439
|
+
- `5m`: 5 minutes
|
|
1440
|
+
- `1h`: 1 hour
|
|
1441
|
+
|
|
1442
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1443
|
+
"""
|
|
1444
|
+
|
|
1445
|
+
|
|
1446
|
+
class ListPromptVersionsMessagesCacheControlTypedDict(TypedDict):
|
|
1447
|
+
type: ListPromptVersionsMessagesPromptsType
|
|
1448
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1449
|
+
ttl: NotRequired[ListPromptVersionsMessagesTTL]
|
|
1450
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1451
|
+
|
|
1452
|
+
- `5m`: 5 minutes
|
|
1453
|
+
- `1h`: 1 hour
|
|
1454
|
+
|
|
1455
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1456
|
+
"""
|
|
1457
|
+
|
|
1458
|
+
|
|
1459
|
+
class ListPromptVersionsMessagesCacheControl(BaseModel):
|
|
1460
|
+
type: ListPromptVersionsMessagesPromptsType
|
|
1461
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1462
|
+
|
|
1463
|
+
ttl: Optional[ListPromptVersionsMessagesTTL] = "5m"
|
|
1464
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1465
|
+
|
|
1466
|
+
- `5m`: 5 minutes
|
|
1467
|
+
- `1h`: 1 hour
|
|
1468
|
+
|
|
1469
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1470
|
+
"""
|
|
1471
|
+
|
|
1472
|
+
@model_serializer(mode="wrap")
|
|
1473
|
+
def serialize_model(self, handler):
|
|
1474
|
+
optional_fields = set(["ttl"])
|
|
1475
|
+
serialized = handler(self)
|
|
1476
|
+
m = {}
|
|
1477
|
+
|
|
1478
|
+
for n, f in type(self).model_fields.items():
|
|
1479
|
+
k = f.alias or n
|
|
1480
|
+
val = serialized.get(k)
|
|
1481
|
+
|
|
1482
|
+
if val != UNSET_SENTINEL:
|
|
1483
|
+
if val is not None or k not in optional_fields:
|
|
1484
|
+
m[k] = val
|
|
1485
|
+
|
|
1486
|
+
return m
|
|
1487
|
+
|
|
1488
|
+
|
|
1489
|
+
class ListPromptVersionsMessagesToolMessageTypedDict(TypedDict):
|
|
1490
|
+
role: ListPromptVersionsMessagesPromptsResponse200Role
|
|
1491
|
+
r"""The role of the messages author, in this case tool."""
|
|
1492
|
+
content: ListPromptVersionsMessagesPromptsResponse200ContentTypedDict
|
|
1493
|
+
r"""The contents of the tool message."""
|
|
1494
|
+
tool_call_id: Nullable[str]
|
|
1495
|
+
r"""Tool call that this message is responding to."""
|
|
1496
|
+
cache_control: NotRequired[ListPromptVersionsMessagesCacheControlTypedDict]
|
|
1497
|
+
|
|
1498
|
+
|
|
1499
|
+
class ListPromptVersionsMessagesToolMessage(BaseModel):
|
|
1500
|
+
role: ListPromptVersionsMessagesPromptsResponse200Role
|
|
1501
|
+
r"""The role of the messages author, in this case tool."""
|
|
1502
|
+
|
|
1503
|
+
content: ListPromptVersionsMessagesPromptsResponse200Content
|
|
1504
|
+
r"""The contents of the tool message."""
|
|
1505
|
+
|
|
1506
|
+
tool_call_id: Nullable[str]
|
|
1507
|
+
r"""Tool call that this message is responding to."""
|
|
1508
|
+
|
|
1509
|
+
cache_control: Optional[ListPromptVersionsMessagesCacheControl] = None
|
|
1510
|
+
|
|
1511
|
+
@model_serializer(mode="wrap")
|
|
1512
|
+
def serialize_model(self, handler):
|
|
1513
|
+
optional_fields = set(["cache_control"])
|
|
1514
|
+
nullable_fields = set(["tool_call_id"])
|
|
1515
|
+
serialized = handler(self)
|
|
1516
|
+
m = {}
|
|
1517
|
+
|
|
1518
|
+
for n, f in type(self).model_fields.items():
|
|
1519
|
+
k = f.alias or n
|
|
1520
|
+
val = serialized.get(k)
|
|
1521
|
+
is_nullable_and_explicitly_set = (
|
|
1522
|
+
k in nullable_fields
|
|
1523
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1524
|
+
)
|
|
1525
|
+
|
|
1526
|
+
if val != UNSET_SENTINEL:
|
|
1527
|
+
if (
|
|
1528
|
+
val is not None
|
|
1529
|
+
or k not in optional_fields
|
|
1530
|
+
or is_nullable_and_explicitly_set
|
|
1531
|
+
):
|
|
1532
|
+
m[k] = val
|
|
1533
|
+
|
|
1534
|
+
return m
|
|
1535
|
+
|
|
1536
|
+
|
|
1537
|
+
ListPromptVersionsContentPromptsResponse2TypedDict = TypeAliasType(
|
|
1538
|
+
"ListPromptVersionsContentPromptsResponse2TypedDict",
|
|
1539
|
+
Union[
|
|
1540
|
+
RefusalPartSchemaTypedDict,
|
|
1541
|
+
RedactedReasoningPartSchemaTypedDict,
|
|
1542
|
+
TextContentPartSchemaTypedDict,
|
|
1543
|
+
ReasoningPartSchemaTypedDict,
|
|
1544
|
+
],
|
|
1545
|
+
)
|
|
1546
|
+
|
|
1547
|
+
|
|
1548
|
+
ListPromptVersionsContentPromptsResponse2 = Annotated[
|
|
1549
|
+
Union[
|
|
1550
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
1551
|
+
Annotated[RefusalPartSchema, Tag("refusal")],
|
|
1552
|
+
Annotated[ReasoningPartSchema, Tag("reasoning")],
|
|
1553
|
+
Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
|
|
1554
|
+
],
|
|
1555
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1556
|
+
]
|
|
1557
|
+
|
|
1558
|
+
|
|
1559
|
+
ListPromptVersionsMessagesPromptsResponseContentTypedDict = TypeAliasType(
|
|
1560
|
+
"ListPromptVersionsMessagesPromptsResponseContentTypedDict",
|
|
1561
|
+
Union[str, List[ListPromptVersionsContentPromptsResponse2TypedDict]],
|
|
1562
|
+
)
|
|
1563
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1564
|
+
|
|
1565
|
+
|
|
1566
|
+
ListPromptVersionsMessagesPromptsResponseContent = TypeAliasType(
|
|
1567
|
+
"ListPromptVersionsMessagesPromptsResponseContent",
|
|
1568
|
+
Union[str, List[ListPromptVersionsContentPromptsResponse2]],
|
|
1569
|
+
)
|
|
1570
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1571
|
+
|
|
1572
|
+
|
|
1573
|
+
ListPromptVersionsMessagesPromptsResponseRole = Literal["assistant",]
|
|
1574
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
1575
|
+
|
|
1576
|
+
|
|
1577
|
+
class ListPromptVersionsMessagesAudioTypedDict(TypedDict):
|
|
1578
|
+
r"""Data about a previous audio response from the model."""
|
|
1579
|
+
|
|
1580
|
+
id: str
|
|
1581
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
1582
|
+
|
|
1583
|
+
|
|
1584
|
+
class ListPromptVersionsMessagesAudio(BaseModel):
|
|
1585
|
+
r"""Data about a previous audio response from the model."""
|
|
1586
|
+
|
|
1587
|
+
id: str
|
|
1588
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
1589
|
+
|
|
1590
|
+
|
|
1591
|
+
ListPromptVersionsMessagesType = Literal["function",]
|
|
1592
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
1593
|
+
|
|
1594
|
+
|
|
1595
|
+
class ListPromptVersionsMessagesFunctionTypedDict(TypedDict):
|
|
1596
|
+
name: NotRequired[str]
|
|
1597
|
+
r"""The name of the function to call."""
|
|
1598
|
+
arguments: NotRequired[str]
|
|
1599
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
1600
|
+
|
|
1601
|
+
|
|
1602
|
+
class ListPromptVersionsMessagesFunction(BaseModel):
|
|
1603
|
+
name: Optional[str] = None
|
|
1604
|
+
r"""The name of the function to call."""
|
|
1605
|
+
|
|
1606
|
+
arguments: Optional[str] = None
|
|
1607
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
1608
|
+
|
|
1609
|
+
@model_serializer(mode="wrap")
|
|
1610
|
+
def serialize_model(self, handler):
|
|
1611
|
+
optional_fields = set(["name", "arguments"])
|
|
1612
|
+
serialized = handler(self)
|
|
1613
|
+
m = {}
|
|
1614
|
+
|
|
1615
|
+
for n, f in type(self).model_fields.items():
|
|
1616
|
+
k = f.alias or n
|
|
1617
|
+
val = serialized.get(k)
|
|
1618
|
+
|
|
1619
|
+
if val != UNSET_SENTINEL:
|
|
1620
|
+
if val is not None or k not in optional_fields:
|
|
1621
|
+
m[k] = val
|
|
1622
|
+
|
|
1623
|
+
return m
|
|
1624
|
+
|
|
1625
|
+
|
|
1626
|
+
class ListPromptVersionsMessagesToolCallsTypedDict(TypedDict):
|
|
1627
|
+
id: str
|
|
1628
|
+
r"""The ID of the tool call."""
|
|
1629
|
+
type: ListPromptVersionsMessagesType
|
|
1630
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
1631
|
+
function: ListPromptVersionsMessagesFunctionTypedDict
|
|
1632
|
+
thought_signature: NotRequired[str]
|
|
1633
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
1634
|
+
|
|
1635
|
+
|
|
1636
|
+
class ListPromptVersionsMessagesToolCalls(BaseModel):
|
|
1637
|
+
id: str
|
|
1638
|
+
r"""The ID of the tool call."""
|
|
1639
|
+
|
|
1640
|
+
type: ListPromptVersionsMessagesType
|
|
1641
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
1642
|
+
|
|
1643
|
+
function: ListPromptVersionsMessagesFunction
|
|
1644
|
+
|
|
1645
|
+
thought_signature: Optional[str] = None
|
|
1646
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
1647
|
+
|
|
1648
|
+
@model_serializer(mode="wrap")
|
|
1649
|
+
def serialize_model(self, handler):
|
|
1650
|
+
optional_fields = set(["thought_signature"])
|
|
1651
|
+
serialized = handler(self)
|
|
1652
|
+
m = {}
|
|
1653
|
+
|
|
1654
|
+
for n, f in type(self).model_fields.items():
|
|
1655
|
+
k = f.alias or n
|
|
1656
|
+
val = serialized.get(k)
|
|
1657
|
+
|
|
1658
|
+
if val != UNSET_SENTINEL:
|
|
1659
|
+
if val is not None or k not in optional_fields:
|
|
1660
|
+
m[k] = val
|
|
1661
|
+
|
|
1662
|
+
return m
|
|
1663
|
+
|
|
1664
|
+
|
|
1665
|
+
class ListPromptVersionsMessagesAssistantMessageTypedDict(TypedDict):
|
|
1666
|
+
role: ListPromptVersionsMessagesPromptsResponseRole
|
|
1667
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
1668
|
+
content: NotRequired[
|
|
1669
|
+
Nullable[ListPromptVersionsMessagesPromptsResponseContentTypedDict]
|
|
1670
|
+
]
|
|
1671
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1672
|
+
refusal: NotRequired[Nullable[str]]
|
|
1673
|
+
r"""The refusal message by the assistant."""
|
|
1674
|
+
name: NotRequired[str]
|
|
1675
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1676
|
+
audio: NotRequired[Nullable[ListPromptVersionsMessagesAudioTypedDict]]
|
|
1677
|
+
r"""Data about a previous audio response from the model."""
|
|
1678
|
+
tool_calls: NotRequired[List[ListPromptVersionsMessagesToolCallsTypedDict]]
|
|
1679
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
1680
|
+
|
|
1681
|
+
|
|
1682
|
+
class ListPromptVersionsMessagesAssistantMessage(BaseModel):
|
|
1683
|
+
role: ListPromptVersionsMessagesPromptsResponseRole
|
|
1684
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
1685
|
+
|
|
1686
|
+
content: OptionalNullable[ListPromptVersionsMessagesPromptsResponseContent] = UNSET
|
|
1687
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1688
|
+
|
|
1689
|
+
refusal: OptionalNullable[str] = UNSET
|
|
1690
|
+
r"""The refusal message by the assistant."""
|
|
1691
|
+
|
|
1692
|
+
name: Optional[str] = None
|
|
1693
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1694
|
+
|
|
1695
|
+
audio: OptionalNullable[ListPromptVersionsMessagesAudio] = UNSET
|
|
1696
|
+
r"""Data about a previous audio response from the model."""
|
|
1697
|
+
|
|
1698
|
+
tool_calls: Optional[List[ListPromptVersionsMessagesToolCalls]] = None
|
|
1699
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
1700
|
+
|
|
1701
|
+
@model_serializer(mode="wrap")
|
|
1702
|
+
def serialize_model(self, handler):
|
|
1703
|
+
optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
|
|
1704
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
1705
|
+
serialized = handler(self)
|
|
1706
|
+
m = {}
|
|
1707
|
+
|
|
1708
|
+
for n, f in type(self).model_fields.items():
|
|
1709
|
+
k = f.alias or n
|
|
1710
|
+
val = serialized.get(k)
|
|
1711
|
+
is_nullable_and_explicitly_set = (
|
|
1712
|
+
k in nullable_fields
|
|
1713
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1714
|
+
)
|
|
1715
|
+
|
|
1716
|
+
if val != UNSET_SENTINEL:
|
|
1717
|
+
if (
|
|
1718
|
+
val is not None
|
|
1719
|
+
or k not in optional_fields
|
|
1720
|
+
or is_nullable_and_explicitly_set
|
|
1721
|
+
):
|
|
1722
|
+
m[k] = val
|
|
1723
|
+
|
|
1724
|
+
return m
|
|
1725
|
+
|
|
1726
|
+
|
|
1727
|
+
ListPromptVersionsMessagesPromptsRole = Literal["user",]
|
|
1728
|
+
r"""The role of the messages author, in this case `user`."""
|
|
1729
|
+
|
|
1730
|
+
|
|
1731
|
+
ListPromptVersions2PromptsResponse200Type = Literal["file",]
|
|
1732
|
+
r"""The type of the content part. Always `file`."""
|
|
1733
|
+
|
|
1734
|
+
|
|
1735
|
+
ListPromptVersions2PromptsResponse200ApplicationJSONType = Literal["ephemeral",]
|
|
1736
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1737
|
+
|
|
1738
|
+
|
|
1739
|
+
ListPromptVersions2TTL = Literal[
|
|
1740
|
+
"5m",
|
|
1741
|
+
"1h",
|
|
1742
|
+
]
|
|
1743
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1744
|
+
|
|
1745
|
+
- `5m`: 5 minutes
|
|
1746
|
+
- `1h`: 1 hour
|
|
1747
|
+
|
|
1748
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1749
|
+
"""
|
|
1750
|
+
|
|
1751
|
+
|
|
1752
|
+
class ListPromptVersions2CacheControlTypedDict(TypedDict):
|
|
1753
|
+
type: ListPromptVersions2PromptsResponse200ApplicationJSONType
|
|
1754
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1755
|
+
ttl: NotRequired[ListPromptVersions2TTL]
|
|
1756
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1757
|
+
|
|
1758
|
+
- `5m`: 5 minutes
|
|
1759
|
+
- `1h`: 1 hour
|
|
1760
|
+
|
|
1761
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1762
|
+
"""
|
|
1763
|
+
|
|
1764
|
+
|
|
1765
|
+
class ListPromptVersions2CacheControl(BaseModel):
|
|
1766
|
+
type: ListPromptVersions2PromptsResponse200ApplicationJSONType
|
|
1767
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1768
|
+
|
|
1769
|
+
ttl: Optional[ListPromptVersions2TTL] = "5m"
|
|
1770
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1771
|
+
|
|
1772
|
+
- `5m`: 5 minutes
|
|
1773
|
+
- `1h`: 1 hour
|
|
1774
|
+
|
|
1775
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1776
|
+
"""
|
|
1777
|
+
|
|
1778
|
+
@model_serializer(mode="wrap")
|
|
1779
|
+
def serialize_model(self, handler):
|
|
1780
|
+
optional_fields = set(["ttl"])
|
|
1781
|
+
serialized = handler(self)
|
|
1782
|
+
m = {}
|
|
1783
|
+
|
|
1784
|
+
for n, f in type(self).model_fields.items():
|
|
1785
|
+
k = f.alias or n
|
|
1786
|
+
val = serialized.get(k)
|
|
1787
|
+
|
|
1788
|
+
if val != UNSET_SENTINEL:
|
|
1789
|
+
if val is not None or k not in optional_fields:
|
|
1790
|
+
m[k] = val
|
|
1791
|
+
|
|
1792
|
+
return m
|
|
1793
|
+
|
|
1794
|
+
|
|
1795
|
+
class ListPromptVersions24TypedDict(TypedDict):
|
|
1796
|
+
type: ListPromptVersions2PromptsResponse200Type
|
|
1797
|
+
r"""The type of the content part. Always `file`."""
|
|
1798
|
+
file: FileContentPartSchemaTypedDict
|
|
1799
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
1800
|
+
cache_control: NotRequired[ListPromptVersions2CacheControlTypedDict]
|
|
1801
|
+
|
|
1802
|
+
|
|
1803
|
+
class ListPromptVersions24(BaseModel):
|
|
1804
|
+
type: ListPromptVersions2PromptsResponse200Type
|
|
1805
|
+
r"""The type of the content part. Always `file`."""
|
|
1806
|
+
|
|
1807
|
+
file: FileContentPartSchema
|
|
1808
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
1809
|
+
|
|
1810
|
+
cache_control: Optional[ListPromptVersions2CacheControl] = None
|
|
1811
|
+
|
|
1812
|
+
@model_serializer(mode="wrap")
|
|
1813
|
+
def serialize_model(self, handler):
|
|
1814
|
+
optional_fields = set(["cache_control"])
|
|
1815
|
+
serialized = handler(self)
|
|
1816
|
+
m = {}
|
|
1817
|
+
|
|
1818
|
+
for n, f in type(self).model_fields.items():
|
|
1819
|
+
k = f.alias or n
|
|
1820
|
+
val = serialized.get(k)
|
|
1821
|
+
|
|
1822
|
+
if val != UNSET_SENTINEL:
|
|
1823
|
+
if val is not None or k not in optional_fields:
|
|
1824
|
+
m[k] = val
|
|
1825
|
+
|
|
1826
|
+
return m
|
|
1827
|
+
|
|
1828
|
+
|
|
1829
|
+
ListPromptVersionsContentPrompts2TypedDict = TypeAliasType(
|
|
1830
|
+
"ListPromptVersionsContentPrompts2TypedDict",
|
|
1831
|
+
Union[
|
|
1832
|
+
AudioContentPartSchemaTypedDict,
|
|
1833
|
+
TextContentPartSchemaTypedDict,
|
|
1834
|
+
ImageContentPartSchemaTypedDict,
|
|
1835
|
+
ListPromptVersions24TypedDict,
|
|
1836
|
+
],
|
|
1837
|
+
)
|
|
1838
|
+
|
|
1839
|
+
|
|
1840
|
+
ListPromptVersionsContentPrompts2 = Annotated[
|
|
1841
|
+
Union[
|
|
1842
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
1843
|
+
Annotated[ImageContentPartSchema, Tag("image_url")],
|
|
1844
|
+
Annotated[AudioContentPartSchema, Tag("input_audio")],
|
|
1845
|
+
Annotated[ListPromptVersions24, Tag("file")],
|
|
1846
|
+
],
|
|
1847
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1848
|
+
]
|
|
1849
|
+
|
|
1850
|
+
|
|
1851
|
+
ListPromptVersionsMessagesPromptsContentTypedDict = TypeAliasType(
|
|
1852
|
+
"ListPromptVersionsMessagesPromptsContentTypedDict",
|
|
1853
|
+
Union[str, List[ListPromptVersionsContentPrompts2TypedDict]],
|
|
1854
|
+
)
|
|
1855
|
+
r"""The contents of the user message."""
|
|
1856
|
+
|
|
1857
|
+
|
|
1858
|
+
ListPromptVersionsMessagesPromptsContent = TypeAliasType(
|
|
1859
|
+
"ListPromptVersionsMessagesPromptsContent",
|
|
1860
|
+
Union[str, List[ListPromptVersionsContentPrompts2]],
|
|
1861
|
+
)
|
|
1862
|
+
r"""The contents of the user message."""
|
|
1863
|
+
|
|
1864
|
+
|
|
1865
|
+
class ListPromptVersionsMessagesUserMessageTypedDict(TypedDict):
|
|
1866
|
+
role: ListPromptVersionsMessagesPromptsRole
|
|
1867
|
+
r"""The role of the messages author, in this case `user`."""
|
|
1868
|
+
content: ListPromptVersionsMessagesPromptsContentTypedDict
|
|
1869
|
+
r"""The contents of the user message."""
|
|
1870
|
+
name: NotRequired[str]
|
|
1871
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1872
|
+
|
|
1873
|
+
|
|
1874
|
+
class ListPromptVersionsMessagesUserMessage(BaseModel):
|
|
1875
|
+
role: ListPromptVersionsMessagesPromptsRole
|
|
1876
|
+
r"""The role of the messages author, in this case `user`."""
|
|
1877
|
+
|
|
1878
|
+
content: ListPromptVersionsMessagesPromptsContent
|
|
1879
|
+
r"""The contents of the user message."""
|
|
1880
|
+
|
|
1881
|
+
name: Optional[str] = None
|
|
1882
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1883
|
+
|
|
1884
|
+
@model_serializer(mode="wrap")
|
|
1885
|
+
def serialize_model(self, handler):
|
|
1886
|
+
optional_fields = set(["name"])
|
|
1887
|
+
serialized = handler(self)
|
|
1888
|
+
m = {}
|
|
1889
|
+
|
|
1890
|
+
for n, f in type(self).model_fields.items():
|
|
1891
|
+
k = f.alias or n
|
|
1892
|
+
val = serialized.get(k)
|
|
1893
|
+
|
|
1894
|
+
if val != UNSET_SENTINEL:
|
|
1895
|
+
if val is not None or k not in optional_fields:
|
|
1896
|
+
m[k] = val
|
|
1897
|
+
|
|
1898
|
+
return m
|
|
1899
|
+
|
|
1900
|
+
|
|
1901
|
+
ListPromptVersionsMessagesRole = Literal["system",]
|
|
1902
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1903
|
+
|
|
1904
|
+
|
|
1905
|
+
ListPromptVersionsMessagesContentTypedDict = TypeAliasType(
|
|
1906
|
+
"ListPromptVersionsMessagesContentTypedDict",
|
|
1907
|
+
Union[str, List[TextContentPartSchemaTypedDict]],
|
|
1908
|
+
)
|
|
1909
|
+
r"""The contents of the system message."""
|
|
1910
|
+
|
|
1911
|
+
|
|
1912
|
+
ListPromptVersionsMessagesContent = TypeAliasType(
|
|
1913
|
+
"ListPromptVersionsMessagesContent", Union[str, List[TextContentPartSchema]]
|
|
1914
|
+
)
|
|
1915
|
+
r"""The contents of the system message."""
|
|
1916
|
+
|
|
1917
|
+
|
|
1918
|
+
class ListPromptVersionsMessagesSystemMessageTypedDict(TypedDict):
|
|
1919
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
1920
|
+
|
|
1921
|
+
role: ListPromptVersionsMessagesRole
|
|
1922
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1923
|
+
content: ListPromptVersionsMessagesContentTypedDict
|
|
1924
|
+
r"""The contents of the system message."""
|
|
1925
|
+
name: NotRequired[str]
|
|
1926
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1927
|
+
|
|
1928
|
+
|
|
1929
|
+
class ListPromptVersionsMessagesSystemMessage(BaseModel):
|
|
1930
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
1931
|
+
|
|
1932
|
+
role: ListPromptVersionsMessagesRole
|
|
1933
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1934
|
+
|
|
1935
|
+
content: ListPromptVersionsMessagesContent
|
|
1936
|
+
r"""The contents of the system message."""
|
|
1937
|
+
|
|
1938
|
+
name: Optional[str] = None
|
|
1939
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1940
|
+
|
|
1941
|
+
@model_serializer(mode="wrap")
|
|
1942
|
+
def serialize_model(self, handler):
|
|
1943
|
+
optional_fields = set(["name"])
|
|
1944
|
+
serialized = handler(self)
|
|
1945
|
+
m = {}
|
|
1946
|
+
|
|
1947
|
+
for n, f in type(self).model_fields.items():
|
|
1948
|
+
k = f.alias or n
|
|
1949
|
+
val = serialized.get(k)
|
|
1950
|
+
|
|
1951
|
+
if val != UNSET_SENTINEL:
|
|
1952
|
+
if val is not None or k not in optional_fields:
|
|
1953
|
+
m[k] = val
|
|
1954
|
+
|
|
1955
|
+
return m
|
|
1956
|
+
|
|
1957
|
+
|
|
1958
|
+
ListPromptVersionsPromptsMessagesTypedDict = TypeAliasType(
|
|
1959
|
+
"ListPromptVersionsPromptsMessagesTypedDict",
|
|
1960
|
+
Union[
|
|
1961
|
+
ListPromptVersionsMessagesSystemMessageTypedDict,
|
|
1962
|
+
ListPromptVersionsMessagesUserMessageTypedDict,
|
|
1963
|
+
ListPromptVersionsMessagesToolMessageTypedDict,
|
|
1964
|
+
ListPromptVersionsMessagesAssistantMessageTypedDict,
|
|
1965
|
+
],
|
|
1966
|
+
)
|
|
1967
|
+
|
|
1968
|
+
|
|
1969
|
+
ListPromptVersionsPromptsMessages = Annotated[
|
|
1970
|
+
Union[
|
|
1971
|
+
Annotated[ListPromptVersionsMessagesSystemMessage, Tag("system")],
|
|
1972
|
+
Annotated[ListPromptVersionsMessagesUserMessage, Tag("user")],
|
|
1973
|
+
Annotated[ListPromptVersionsMessagesAssistantMessage, Tag("assistant")],
|
|
1974
|
+
Annotated[ListPromptVersionsMessagesToolMessage, Tag("tool")],
|
|
1975
|
+
],
|
|
1976
|
+
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
1977
|
+
]
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
class ListPromptVersionsPromptFieldTypedDict(TypedDict):
|
|
1981
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
1982
|
+
|
|
1983
|
+
name: NotRequired[str]
|
|
1984
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1985
|
+
audio: NotRequired[Nullable[ListPromptVersionsAudioTypedDict]]
|
|
1986
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1987
|
+
frequency_penalty: NotRequired[Nullable[float]]
|
|
1988
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
1989
|
+
max_tokens: NotRequired[Nullable[int]]
|
|
1990
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1991
|
+
|
|
1992
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
1993
|
+
"""
|
|
1994
|
+
max_completion_tokens: NotRequired[Nullable[int]]
|
|
1995
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
1996
|
+
logprobs: NotRequired[Nullable[bool]]
|
|
1997
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
1998
|
+
top_logprobs: NotRequired[Nullable[int]]
|
|
1999
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
2000
|
+
n: NotRequired[Nullable[int]]
|
|
2001
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
2002
|
+
presence_penalty: NotRequired[Nullable[float]]
|
|
2003
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
2004
|
+
response_format: NotRequired[ListPromptVersionsResponseFormatTypedDict]
|
|
2005
|
+
r"""An object specifying the format that the model must output"""
|
|
2006
|
+
reasoning_effort: NotRequired[ListPromptVersionsReasoningEffort]
|
|
2007
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
2008
|
+
|
|
2009
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
2010
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
2011
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
2012
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
2013
|
+
|
|
2014
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
2015
|
+
"""
|
|
2016
|
+
verbosity: NotRequired[str]
|
|
2017
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
2018
|
+
seed: NotRequired[Nullable[float]]
|
|
2019
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
2020
|
+
stop: NotRequired[Nullable[ListPromptVersionsStopTypedDict]]
|
|
2021
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
2022
|
+
stream_options: NotRequired[Nullable[ListPromptVersionsStreamOptionsTypedDict]]
|
|
2023
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
2024
|
+
thinking: NotRequired[ListPromptVersionsThinkingTypedDict]
|
|
2025
|
+
temperature: NotRequired[Nullable[float]]
|
|
2026
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
2027
|
+
top_p: NotRequired[Nullable[float]]
|
|
2028
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
2029
|
+
top_k: NotRequired[Nullable[float]]
|
|
2030
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
2031
|
+
tool_choice: NotRequired[ListPromptVersionsToolChoiceTypedDict]
|
|
2032
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
2033
|
+
parallel_tool_calls: NotRequired[bool]
|
|
2034
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
2035
|
+
modalities: NotRequired[Nullable[List[ListPromptVersionsModalities]]]
|
|
2036
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
2037
|
+
guardrails: NotRequired[List[ListPromptVersionsGuardrailsTypedDict]]
|
|
2038
|
+
r"""A list of guardrails to apply to the request."""
|
|
2039
|
+
fallbacks: NotRequired[List[ListPromptVersionsFallbacksTypedDict]]
|
|
2040
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2041
|
+
retry: NotRequired[ListPromptVersionsRetryTypedDict]
|
|
2042
|
+
r"""Retry configuration for the request"""
|
|
2043
|
+
cache: NotRequired[ListPromptVersionsCacheTypedDict]
|
|
2044
|
+
r"""Cache configuration for the request."""
|
|
2045
|
+
load_balancer: NotRequired[ListPromptVersionsLoadBalancerTypedDict]
|
|
2046
|
+
r"""Load balancer configuration for the request."""
|
|
2047
|
+
timeout: NotRequired[ListPromptVersionsTimeoutTypedDict]
|
|
2048
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2049
|
+
messages: NotRequired[List[ListPromptVersionsPromptsMessagesTypedDict]]
|
|
2050
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
2051
|
+
model: NotRequired[Nullable[str]]
|
|
2052
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
|
|
2053
|
+
version: NotRequired[str]
|
|
2054
|
+
|
|
2055
|
+
|
|
2056
|
+
class ListPromptVersionsPromptField(BaseModel):
|
|
2057
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2058
|
+
|
|
2059
|
+
name: Optional[str] = None
|
|
2060
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
2061
|
+
|
|
2062
|
+
audio: OptionalNullable[ListPromptVersionsAudio] = UNSET
|
|
2063
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
2064
|
+
|
|
2065
|
+
frequency_penalty: OptionalNullable[float] = UNSET
|
|
2066
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
2067
|
+
|
|
2068
|
+
max_tokens: OptionalNullable[int] = UNSET
|
|
2069
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
2070
|
+
|
|
2071
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
2072
|
+
"""
|
|
2073
|
+
|
|
2074
|
+
max_completion_tokens: OptionalNullable[int] = UNSET
|
|
2075
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
2076
|
+
|
|
2077
|
+
logprobs: OptionalNullable[bool] = UNSET
|
|
2078
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
2079
|
+
|
|
2080
|
+
top_logprobs: OptionalNullable[int] = UNSET
|
|
2081
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
2082
|
+
|
|
2083
|
+
n: OptionalNullable[int] = UNSET
|
|
2084
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
2085
|
+
|
|
2086
|
+
presence_penalty: OptionalNullable[float] = UNSET
|
|
2087
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
2088
|
+
|
|
2089
|
+
response_format: Optional[ListPromptVersionsResponseFormat] = None
|
|
2090
|
+
r"""An object specifying the format that the model must output"""
|
|
2091
|
+
|
|
2092
|
+
reasoning_effort: Optional[ListPromptVersionsReasoningEffort] = None
|
|
2093
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
2094
|
+
|
|
2095
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
2096
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
2097
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
2098
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
2099
|
+
|
|
2100
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
2101
|
+
"""
|
|
2102
|
+
|
|
2103
|
+
verbosity: Optional[str] = None
|
|
2104
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
2105
|
+
|
|
2106
|
+
seed: OptionalNullable[float] = UNSET
|
|
2107
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
2108
|
+
|
|
2109
|
+
stop: OptionalNullable[ListPromptVersionsStop] = UNSET
|
|
2110
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
2111
|
+
|
|
2112
|
+
stream_options: OptionalNullable[ListPromptVersionsStreamOptions] = UNSET
|
|
2113
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
2114
|
+
|
|
2115
|
+
thinking: Optional[ListPromptVersionsThinking] = None
|
|
2116
|
+
|
|
2117
|
+
temperature: OptionalNullable[float] = UNSET
|
|
2118
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
2119
|
+
|
|
2120
|
+
top_p: OptionalNullable[float] = UNSET
|
|
2121
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
2122
|
+
|
|
2123
|
+
top_k: OptionalNullable[float] = UNSET
|
|
2124
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
2125
|
+
|
|
2126
|
+
tool_choice: Optional[ListPromptVersionsToolChoice] = None
|
|
2127
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
2128
|
+
|
|
2129
|
+
parallel_tool_calls: Optional[bool] = None
|
|
2130
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
2131
|
+
|
|
2132
|
+
modalities: OptionalNullable[List[ListPromptVersionsModalities]] = UNSET
|
|
2133
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
2134
|
+
|
|
2135
|
+
guardrails: Optional[List[ListPromptVersionsGuardrails]] = None
|
|
2136
|
+
r"""A list of guardrails to apply to the request."""
|
|
2137
|
+
|
|
2138
|
+
fallbacks: Optional[List[ListPromptVersionsFallbacks]] = None
|
|
2139
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2140
|
+
|
|
2141
|
+
retry: Optional[ListPromptVersionsRetry] = None
|
|
2142
|
+
r"""Retry configuration for the request"""
|
|
2143
|
+
|
|
2144
|
+
cache: Optional[ListPromptVersionsCache] = None
|
|
2145
|
+
r"""Cache configuration for the request."""
|
|
2146
|
+
|
|
2147
|
+
load_balancer: Optional[ListPromptVersionsLoadBalancer] = None
|
|
2148
|
+
r"""Load balancer configuration for the request."""
|
|
2149
|
+
|
|
2150
|
+
timeout: Optional[ListPromptVersionsTimeout] = None
|
|
2151
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2152
|
+
|
|
2153
|
+
messages: Optional[List[ListPromptVersionsPromptsMessages]] = None
|
|
2154
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
2155
|
+
|
|
2156
|
+
model: OptionalNullable[str] = UNSET
|
|
2157
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
|
|
2158
|
+
|
|
2159
|
+
version: Optional[str] = None
|
|
2160
|
+
|
|
2161
|
+
@model_serializer(mode="wrap")
|
|
2162
|
+
def serialize_model(self, handler):
|
|
2163
|
+
optional_fields = set(
|
|
2164
|
+
[
|
|
2165
|
+
"name",
|
|
2166
|
+
"audio",
|
|
2167
|
+
"frequency_penalty",
|
|
2168
|
+
"max_tokens",
|
|
2169
|
+
"max_completion_tokens",
|
|
2170
|
+
"logprobs",
|
|
2171
|
+
"top_logprobs",
|
|
2172
|
+
"n",
|
|
2173
|
+
"presence_penalty",
|
|
2174
|
+
"response_format",
|
|
2175
|
+
"reasoning_effort",
|
|
2176
|
+
"verbosity",
|
|
2177
|
+
"seed",
|
|
2178
|
+
"stop",
|
|
2179
|
+
"stream_options",
|
|
2180
|
+
"thinking",
|
|
2181
|
+
"temperature",
|
|
2182
|
+
"top_p",
|
|
2183
|
+
"top_k",
|
|
2184
|
+
"tool_choice",
|
|
2185
|
+
"parallel_tool_calls",
|
|
2186
|
+
"modalities",
|
|
2187
|
+
"guardrails",
|
|
2188
|
+
"fallbacks",
|
|
2189
|
+
"retry",
|
|
2190
|
+
"cache",
|
|
2191
|
+
"load_balancer",
|
|
2192
|
+
"timeout",
|
|
2193
|
+
"messages",
|
|
2194
|
+
"model",
|
|
2195
|
+
"version",
|
|
2196
|
+
]
|
|
2197
|
+
)
|
|
2198
|
+
nullable_fields = set(
|
|
2199
|
+
[
|
|
2200
|
+
"audio",
|
|
2201
|
+
"frequency_penalty",
|
|
2202
|
+
"max_tokens",
|
|
2203
|
+
"max_completion_tokens",
|
|
2204
|
+
"logprobs",
|
|
2205
|
+
"top_logprobs",
|
|
2206
|
+
"n",
|
|
2207
|
+
"presence_penalty",
|
|
2208
|
+
"seed",
|
|
2209
|
+
"stop",
|
|
2210
|
+
"stream_options",
|
|
2211
|
+
"temperature",
|
|
2212
|
+
"top_p",
|
|
2213
|
+
"top_k",
|
|
2214
|
+
"modalities",
|
|
2215
|
+
"model",
|
|
2216
|
+
]
|
|
2217
|
+
)
|
|
2218
|
+
serialized = handler(self)
|
|
2219
|
+
m = {}
|
|
2220
|
+
|
|
2221
|
+
for n, f in type(self).model_fields.items():
|
|
2222
|
+
k = f.alias or n
|
|
2223
|
+
val = serialized.get(k)
|
|
2224
|
+
is_nullable_and_explicitly_set = (
|
|
2225
|
+
k in nullable_fields
|
|
2226
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2227
|
+
)
|
|
2228
|
+
|
|
2229
|
+
if val != UNSET_SENTINEL:
|
|
2230
|
+
if (
|
|
2231
|
+
val is not None
|
|
2232
|
+
or k not in optional_fields
|
|
2233
|
+
or is_nullable_and_explicitly_set
|
|
2234
|
+
):
|
|
2235
|
+
m[k] = val
|
|
2236
|
+
|
|
2237
|
+
return m
|
|
2238
|
+
|
|
2239
|
+
|
|
2240
|
+
ListPromptVersionsUseCases = Literal[
|
|
2241
|
+
"Agents simulations",
|
|
2242
|
+
"Agents",
|
|
2243
|
+
"API interaction",
|
|
2244
|
+
"Autonomous Agents",
|
|
2245
|
+
"Chatbots",
|
|
2246
|
+
"Classification",
|
|
2247
|
+
"Code understanding",
|
|
2248
|
+
"Code writing",
|
|
2249
|
+
"Conversation",
|
|
2250
|
+
"Documents QA",
|
|
2251
|
+
"Evaluation",
|
|
2252
|
+
"Extraction",
|
|
2253
|
+
"Multi-modal",
|
|
2254
|
+
"Self-checking",
|
|
2255
|
+
"Sentiment analysis",
|
|
2256
|
+
"SQL",
|
|
2257
|
+
"Summarization",
|
|
2258
|
+
"Tagging",
|
|
2259
|
+
"Translation (document)",
|
|
2260
|
+
"Translation (sentences)",
|
|
2261
|
+
]
|
|
2262
|
+
|
|
2263
|
+
|
|
2264
|
+
ListPromptVersionsLanguage = Literal[
|
|
2265
|
+
"Chinese",
|
|
2266
|
+
"Dutch",
|
|
2267
|
+
"English",
|
|
2268
|
+
"French",
|
|
2269
|
+
"German",
|
|
2270
|
+
"Russian",
|
|
2271
|
+
"Spanish",
|
|
2272
|
+
]
|
|
2273
|
+
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
2274
|
+
|
|
2275
|
+
|
|
2276
|
+
class ListPromptVersionsMetadataTypedDict(TypedDict):
|
|
2277
|
+
use_cases: NotRequired[List[ListPromptVersionsUseCases]]
|
|
2278
|
+
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
2279
|
+
language: NotRequired[Nullable[ListPromptVersionsLanguage]]
|
|
2280
|
+
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
2281
|
+
|
|
2282
|
+
|
|
2283
|
+
class ListPromptVersionsMetadata(BaseModel):
|
|
2284
|
+
use_cases: Optional[List[ListPromptVersionsUseCases]] = None
|
|
2285
|
+
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
2286
|
+
|
|
2287
|
+
language: OptionalNullable[ListPromptVersionsLanguage] = UNSET
|
|
2288
|
+
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
2289
|
+
|
|
2290
|
+
@model_serializer(mode="wrap")
|
|
2291
|
+
def serialize_model(self, handler):
|
|
2292
|
+
optional_fields = set(["use_cases", "language"])
|
|
2293
|
+
nullable_fields = set(["language"])
|
|
2294
|
+
serialized = handler(self)
|
|
2295
|
+
m = {}
|
|
2296
|
+
|
|
2297
|
+
for n, f in type(self).model_fields.items():
|
|
2298
|
+
k = f.alias or n
|
|
2299
|
+
val = serialized.get(k)
|
|
2300
|
+
is_nullable_and_explicitly_set = (
|
|
2301
|
+
k in nullable_fields
|
|
2302
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2303
|
+
)
|
|
2304
|
+
|
|
2305
|
+
if val != UNSET_SENTINEL:
|
|
2306
|
+
if (
|
|
2307
|
+
val is not None
|
|
2308
|
+
or k not in optional_fields
|
|
2309
|
+
or is_nullable_and_explicitly_set
|
|
2310
|
+
):
|
|
2311
|
+
m[k] = val
|
|
2312
|
+
|
|
2313
|
+
return m
|
|
2314
|
+
|
|
2315
|
+
|
|
2316
|
+
class ListPromptVersionsDataTypedDict(TypedDict):
|
|
2317
|
+
id: str
|
|
2318
|
+
prompt: ListPromptVersionsPromptFieldTypedDict
|
|
2319
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2320
|
+
timestamp: str
|
|
2321
|
+
created_by_id: NotRequired[Nullable[str]]
|
|
2322
|
+
updated_by_id: NotRequired[Nullable[str]]
|
|
2323
|
+
description: NotRequired[Nullable[str]]
|
|
2324
|
+
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
2325
|
+
prompt_config: NotRequired[ListPromptVersionsPromptConfigTypedDict]
|
|
2326
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
2327
|
+
metadata: NotRequired[ListPromptVersionsMetadataTypedDict]
|
|
2328
|
+
|
|
2329
|
+
|
|
2330
|
+
class ListPromptVersionsData(BaseModel):
|
|
2331
|
+
id: Annotated[str, pydantic.Field(alias="_id")]
|
|
2332
|
+
|
|
2333
|
+
prompt: ListPromptVersionsPromptField
|
|
2334
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2335
|
+
|
|
2336
|
+
timestamp: str
|
|
2337
|
+
|
|
2338
|
+
created_by_id: OptionalNullable[str] = UNSET
|
|
2339
|
+
|
|
2340
|
+
updated_by_id: OptionalNullable[str] = UNSET
|
|
2341
|
+
|
|
2342
|
+
description: OptionalNullable[str] = UNSET
|
|
2343
|
+
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
2344
|
+
|
|
2345
|
+
prompt_config: Annotated[
|
|
2346
|
+
Optional[ListPromptVersionsPromptConfig],
|
|
2347
|
+
pydantic.Field(
|
|
2348
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2349
|
+
),
|
|
2350
|
+
] = None
|
|
2351
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
2352
|
+
|
|
2353
|
+
metadata: Optional[ListPromptVersionsMetadata] = None
|
|
2354
|
+
|
|
2355
|
+
@model_serializer(mode="wrap")
|
|
2356
|
+
def serialize_model(self, handler):
|
|
2357
|
+
optional_fields = set(
|
|
2358
|
+
[
|
|
2359
|
+
"created_by_id",
|
|
2360
|
+
"updated_by_id",
|
|
2361
|
+
"description",
|
|
2362
|
+
"prompt_config",
|
|
2363
|
+
"metadata",
|
|
2364
|
+
]
|
|
2365
|
+
)
|
|
2366
|
+
nullable_fields = set(["created_by_id", "updated_by_id", "description"])
|
|
2367
|
+
serialized = handler(self)
|
|
892
2368
|
m = {}
|
|
893
2369
|
|
|
894
2370
|
for n, f in type(self).model_fields.items():
|
|
895
2371
|
k = f.alias or n
|
|
896
2372
|
val = serialized.get(k)
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
):
|
|
910
|
-
m[k] = val
|
|
2373
|
+
is_nullable_and_explicitly_set = (
|
|
2374
|
+
k in nullable_fields
|
|
2375
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2376
|
+
)
|
|
2377
|
+
|
|
2378
|
+
if val != UNSET_SENTINEL:
|
|
2379
|
+
if (
|
|
2380
|
+
val is not None
|
|
2381
|
+
or k not in optional_fields
|
|
2382
|
+
or is_nullable_and_explicitly_set
|
|
2383
|
+
):
|
|
2384
|
+
m[k] = val
|
|
911
2385
|
|
|
912
2386
|
return m
|
|
913
2387
|
|