orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +332 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +258 -0
- orq_ai_sdk/embeddings.py +238 -0
- orq_ai_sdk/generations.py +272 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5341 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1922 -384
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1375 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +1890 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +384 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +504 -0
- orq_ai_sdk/models/createimageop.py +208 -117
- orq_ai_sdk/models/createimagevariationop.py +486 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2748 -1252
- orq_ai_sdk/models/creatererankop.py +416 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +316 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +562 -0
- orq_ai_sdk/models/createtranslationop.py +540 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1690 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1673 -230
- orq_ai_sdk/models/getpromptversionop.py +1670 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +793 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1684 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1451 -197
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1428 -195
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1951 -404
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2844 -1450
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +660 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +232 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +251 -0
- orq_ai_sdk/transcriptions.py +326 -0
- orq_ai_sdk/translations.py +298 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +254 -0
- orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
- orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -1,6 +1,30 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .audiocontentpartschema import (
|
|
5
|
+
AudioContentPartSchema,
|
|
6
|
+
AudioContentPartSchemaTypedDict,
|
|
7
|
+
)
|
|
8
|
+
from .filecontentpartschema import FileContentPartSchema, FileContentPartSchemaTypedDict
|
|
9
|
+
from .imagecontentpartschema import (
|
|
10
|
+
ImageContentPartSchema,
|
|
11
|
+
ImageContentPartSchemaTypedDict,
|
|
12
|
+
)
|
|
13
|
+
from .reasoningpartschema import ReasoningPartSchema, ReasoningPartSchemaTypedDict
|
|
14
|
+
from .redactedreasoningpartschema import (
|
|
15
|
+
RedactedReasoningPartSchema,
|
|
16
|
+
RedactedReasoningPartSchemaTypedDict,
|
|
17
|
+
)
|
|
18
|
+
from .refusalpartschema import RefusalPartSchema, RefusalPartSchemaTypedDict
|
|
19
|
+
from .textcontentpartschema import TextContentPartSchema, TextContentPartSchemaTypedDict
|
|
20
|
+
from .thinkingconfigdisabledschema import (
|
|
21
|
+
ThinkingConfigDisabledSchema,
|
|
22
|
+
ThinkingConfigDisabledSchemaTypedDict,
|
|
23
|
+
)
|
|
24
|
+
from .thinkingconfigenabledschema import (
|
|
25
|
+
ThinkingConfigEnabledSchema,
|
|
26
|
+
ThinkingConfigEnabledSchemaTypedDict,
|
|
27
|
+
)
|
|
4
28
|
from dataclasses import dataclass, field
|
|
5
29
|
import httpx
|
|
6
30
|
from orq_ai_sdk.models import OrqError
|
|
@@ -15,7 +39,13 @@ from orq_ai_sdk.utils import FieldMetadata, PathParamMetadata, get_discriminator
|
|
|
15
39
|
import pydantic
|
|
16
40
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
41
|
from typing import Any, Dict, List, Literal, Optional, Union
|
|
18
|
-
from typing_extensions import
|
|
42
|
+
from typing_extensions import (
|
|
43
|
+
Annotated,
|
|
44
|
+
NotRequired,
|
|
45
|
+
TypeAliasType,
|
|
46
|
+
TypedDict,
|
|
47
|
+
deprecated,
|
|
48
|
+
)
|
|
19
49
|
|
|
20
50
|
|
|
21
51
|
class GetPromptVersionRequestTypedDict(TypedDict):
|
|
@@ -67,6 +97,7 @@ GetPromptVersionModelType = Literal[
|
|
|
67
97
|
"tts",
|
|
68
98
|
"stt",
|
|
69
99
|
"rerank",
|
|
100
|
+
"ocr",
|
|
70
101
|
"moderation",
|
|
71
102
|
"vision",
|
|
72
103
|
]
|
|
@@ -107,39 +138,47 @@ GetPromptVersionResponseFormat4 = Literal[
|
|
|
107
138
|
]
|
|
108
139
|
|
|
109
140
|
|
|
110
|
-
|
|
141
|
+
GetPromptVersionResponseFormatPromptsResponse200ApplicationJSONResponseBodyType = (
|
|
142
|
+
Literal["text",]
|
|
143
|
+
)
|
|
111
144
|
|
|
112
145
|
|
|
113
146
|
class GetPromptVersionResponseFormat3TypedDict(TypedDict):
|
|
114
|
-
type:
|
|
147
|
+
type: (
|
|
148
|
+
GetPromptVersionResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
|
|
149
|
+
)
|
|
115
150
|
|
|
116
151
|
|
|
117
152
|
class GetPromptVersionResponseFormat3(BaseModel):
|
|
118
|
-
type:
|
|
153
|
+
type: (
|
|
154
|
+
GetPromptVersionResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
|
|
155
|
+
)
|
|
119
156
|
|
|
120
157
|
|
|
121
|
-
|
|
158
|
+
GetPromptVersionResponseFormatPromptsResponse200ApplicationJSONType = Literal[
|
|
159
|
+
"json_object",
|
|
160
|
+
]
|
|
122
161
|
|
|
123
162
|
|
|
124
163
|
class GetPromptVersionResponseFormat2TypedDict(TypedDict):
|
|
125
|
-
type:
|
|
164
|
+
type: GetPromptVersionResponseFormatPromptsResponse200ApplicationJSONType
|
|
126
165
|
|
|
127
166
|
|
|
128
167
|
class GetPromptVersionResponseFormat2(BaseModel):
|
|
129
|
-
type:
|
|
168
|
+
type: GetPromptVersionResponseFormatPromptsResponse200ApplicationJSONType
|
|
130
169
|
|
|
131
170
|
|
|
132
|
-
|
|
171
|
+
GetPromptVersionResponseFormatPromptsResponse200Type = Literal["json_schema",]
|
|
133
172
|
|
|
134
173
|
|
|
135
|
-
class
|
|
174
|
+
class GetPromptVersionResponseFormatPromptsResponseJSONSchemaTypedDict(TypedDict):
|
|
136
175
|
name: str
|
|
137
176
|
schema_: Dict[str, Any]
|
|
138
177
|
description: NotRequired[str]
|
|
139
178
|
strict: NotRequired[bool]
|
|
140
179
|
|
|
141
180
|
|
|
142
|
-
class
|
|
181
|
+
class GetPromptVersionResponseFormatPromptsResponseJSONSchema(BaseModel):
|
|
143
182
|
name: str
|
|
144
183
|
|
|
145
184
|
schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
|
|
@@ -148,23 +187,55 @@ class GetPromptVersionResponseFormatJSONSchema(BaseModel):
|
|
|
148
187
|
|
|
149
188
|
strict: Optional[bool] = None
|
|
150
189
|
|
|
190
|
+
@model_serializer(mode="wrap")
|
|
191
|
+
def serialize_model(self, handler):
|
|
192
|
+
optional_fields = set(["description", "strict"])
|
|
193
|
+
serialized = handler(self)
|
|
194
|
+
m = {}
|
|
195
|
+
|
|
196
|
+
for n, f in type(self).model_fields.items():
|
|
197
|
+
k = f.alias or n
|
|
198
|
+
val = serialized.get(k)
|
|
199
|
+
|
|
200
|
+
if val != UNSET_SENTINEL:
|
|
201
|
+
if val is not None or k not in optional_fields:
|
|
202
|
+
m[k] = val
|
|
203
|
+
|
|
204
|
+
return m
|
|
205
|
+
|
|
151
206
|
|
|
152
207
|
class GetPromptVersionResponseFormat1TypedDict(TypedDict):
|
|
153
|
-
type:
|
|
154
|
-
json_schema:
|
|
208
|
+
type: GetPromptVersionResponseFormatPromptsResponse200Type
|
|
209
|
+
json_schema: GetPromptVersionResponseFormatPromptsResponseJSONSchemaTypedDict
|
|
155
210
|
display_name: NotRequired[str]
|
|
156
211
|
|
|
157
212
|
|
|
158
213
|
class GetPromptVersionResponseFormat1(BaseModel):
|
|
159
|
-
type:
|
|
214
|
+
type: GetPromptVersionResponseFormatPromptsResponse200Type
|
|
160
215
|
|
|
161
|
-
json_schema:
|
|
216
|
+
json_schema: GetPromptVersionResponseFormatPromptsResponseJSONSchema
|
|
162
217
|
|
|
163
218
|
display_name: Optional[str] = None
|
|
164
219
|
|
|
220
|
+
@model_serializer(mode="wrap")
|
|
221
|
+
def serialize_model(self, handler):
|
|
222
|
+
optional_fields = set(["display_name"])
|
|
223
|
+
serialized = handler(self)
|
|
224
|
+
m = {}
|
|
165
225
|
|
|
166
|
-
|
|
167
|
-
|
|
226
|
+
for n, f in type(self).model_fields.items():
|
|
227
|
+
k = f.alias or n
|
|
228
|
+
val = serialized.get(k)
|
|
229
|
+
|
|
230
|
+
if val != UNSET_SENTINEL:
|
|
231
|
+
if val is not None or k not in optional_fields:
|
|
232
|
+
m[k] = val
|
|
233
|
+
|
|
234
|
+
return m
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
GetPromptVersionPromptsResponseFormatTypedDict = TypeAliasType(
|
|
238
|
+
"GetPromptVersionPromptsResponseFormatTypedDict",
|
|
168
239
|
Union[
|
|
169
240
|
GetPromptVersionResponseFormat2TypedDict,
|
|
170
241
|
GetPromptVersionResponseFormat3TypedDict,
|
|
@@ -184,8 +255,8 @@ Important: when using JSON mode, you must also instruct the model to produce JSO
|
|
|
184
255
|
"""
|
|
185
256
|
|
|
186
257
|
|
|
187
|
-
|
|
188
|
-
"
|
|
258
|
+
GetPromptVersionPromptsResponseFormat = TypeAliasType(
|
|
259
|
+
"GetPromptVersionPromptsResponseFormat",
|
|
189
260
|
Union[
|
|
190
261
|
GetPromptVersionResponseFormat2,
|
|
191
262
|
GetPromptVersionResponseFormat3,
|
|
@@ -219,7 +290,7 @@ GetPromptVersionEncodingFormat = Literal[
|
|
|
219
290
|
r"""The format to return the embeddings"""
|
|
220
291
|
|
|
221
292
|
|
|
222
|
-
|
|
293
|
+
GetPromptVersionPromptsReasoningEffort = Literal[
|
|
223
294
|
"none",
|
|
224
295
|
"disable",
|
|
225
296
|
"minimal",
|
|
@@ -272,7 +343,9 @@ class GetPromptVersionModelParametersTypedDict(TypedDict):
|
|
|
272
343
|
r"""Only supported on `image` models."""
|
|
273
344
|
style: NotRequired[str]
|
|
274
345
|
r"""Only supported on `image` models."""
|
|
275
|
-
response_format: NotRequired[
|
|
346
|
+
response_format: NotRequired[
|
|
347
|
+
Nullable[GetPromptVersionPromptsResponseFormatTypedDict]
|
|
348
|
+
]
|
|
276
349
|
r"""An object specifying the format that the model must output.
|
|
277
350
|
|
|
278
351
|
Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
|
|
@@ -285,7 +358,7 @@ class GetPromptVersionModelParametersTypedDict(TypedDict):
|
|
|
285
358
|
r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
|
|
286
359
|
encoding_format: NotRequired[GetPromptVersionEncodingFormat]
|
|
287
360
|
r"""The format to return the embeddings"""
|
|
288
|
-
reasoning_effort: NotRequired[
|
|
361
|
+
reasoning_effort: NotRequired[GetPromptVersionPromptsReasoningEffort]
|
|
289
362
|
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
290
363
|
budget_tokens: NotRequired[float]
|
|
291
364
|
r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
|
|
@@ -341,7 +414,7 @@ class GetPromptVersionModelParameters(BaseModel):
|
|
|
341
414
|
r"""Only supported on `image` models."""
|
|
342
415
|
|
|
343
416
|
response_format: Annotated[
|
|
344
|
-
OptionalNullable[
|
|
417
|
+
OptionalNullable[GetPromptVersionPromptsResponseFormat],
|
|
345
418
|
pydantic.Field(alias="responseFormat"),
|
|
346
419
|
] = UNSET
|
|
347
420
|
r"""An object specifying the format that the model must output.
|
|
@@ -363,7 +436,7 @@ class GetPromptVersionModelParameters(BaseModel):
|
|
|
363
436
|
r"""The format to return the embeddings"""
|
|
364
437
|
|
|
365
438
|
reasoning_effort: Annotated[
|
|
366
|
-
Optional[
|
|
439
|
+
Optional[GetPromptVersionPromptsReasoningEffort],
|
|
367
440
|
pydantic.Field(alias="reasoningEffort"),
|
|
368
441
|
] = None
|
|
369
442
|
r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
|
|
@@ -383,51 +456,48 @@ class GetPromptVersionModelParameters(BaseModel):
|
|
|
383
456
|
|
|
384
457
|
@model_serializer(mode="wrap")
|
|
385
458
|
def serialize_model(self, handler):
|
|
386
|
-
optional_fields =
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
459
|
+
optional_fields = set(
|
|
460
|
+
[
|
|
461
|
+
"temperature",
|
|
462
|
+
"maxTokens",
|
|
463
|
+
"topK",
|
|
464
|
+
"topP",
|
|
465
|
+
"frequencyPenalty",
|
|
466
|
+
"presencePenalty",
|
|
467
|
+
"numImages",
|
|
468
|
+
"seed",
|
|
469
|
+
"format",
|
|
470
|
+
"dimensions",
|
|
471
|
+
"quality",
|
|
472
|
+
"style",
|
|
473
|
+
"responseFormat",
|
|
474
|
+
"photoRealVersion",
|
|
475
|
+
"encoding_format",
|
|
476
|
+
"reasoningEffort",
|
|
477
|
+
"budgetTokens",
|
|
478
|
+
"verbosity",
|
|
479
|
+
"thinkingLevel",
|
|
480
|
+
]
|
|
481
|
+
)
|
|
482
|
+
nullable_fields = set(["responseFormat"])
|
|
410
483
|
serialized = handler(self)
|
|
411
|
-
|
|
412
484
|
m = {}
|
|
413
485
|
|
|
414
486
|
for n, f in type(self).model_fields.items():
|
|
415
487
|
k = f.alias or n
|
|
416
488
|
val = serialized.get(k)
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
):
|
|
430
|
-
m[k] = val
|
|
489
|
+
is_nullable_and_explicitly_set = (
|
|
490
|
+
k in nullable_fields
|
|
491
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
if val != UNSET_SENTINEL:
|
|
495
|
+
if (
|
|
496
|
+
val is not None
|
|
497
|
+
or k not in optional_fields
|
|
498
|
+
or is_nullable_and_explicitly_set
|
|
499
|
+
):
|
|
500
|
+
m[k] = val
|
|
431
501
|
|
|
432
502
|
return m
|
|
433
503
|
|
|
@@ -503,6 +573,22 @@ class GetPromptVersion2File(BaseModel):
|
|
|
503
573
|
filename: Optional[str] = None
|
|
504
574
|
r"""The name of the file, used when passing the file to the model as a string."""
|
|
505
575
|
|
|
576
|
+
@model_serializer(mode="wrap")
|
|
577
|
+
def serialize_model(self, handler):
|
|
578
|
+
optional_fields = set(["file_data", "uri", "mimeType", "filename"])
|
|
579
|
+
serialized = handler(self)
|
|
580
|
+
m = {}
|
|
581
|
+
|
|
582
|
+
for n, f in type(self).model_fields.items():
|
|
583
|
+
k = f.alias or n
|
|
584
|
+
val = serialized.get(k)
|
|
585
|
+
|
|
586
|
+
if val != UNSET_SENTINEL:
|
|
587
|
+
if val is not None or k not in optional_fields:
|
|
588
|
+
m[k] = val
|
|
589
|
+
|
|
590
|
+
return m
|
|
591
|
+
|
|
506
592
|
|
|
507
593
|
class GetPromptVersion23TypedDict(TypedDict):
|
|
508
594
|
type: GetPromptVersion2PromptsResponseType
|
|
@@ -539,6 +625,22 @@ class GetPromptVersion2ImageURL(BaseModel):
|
|
|
539
625
|
detail: Optional[str] = None
|
|
540
626
|
r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
|
|
541
627
|
|
|
628
|
+
@model_serializer(mode="wrap")
|
|
629
|
+
def serialize_model(self, handler):
|
|
630
|
+
optional_fields = set(["id", "detail"])
|
|
631
|
+
serialized = handler(self)
|
|
632
|
+
m = {}
|
|
633
|
+
|
|
634
|
+
for n, f in type(self).model_fields.items():
|
|
635
|
+
k = f.alias or n
|
|
636
|
+
val = serialized.get(k)
|
|
637
|
+
|
|
638
|
+
if val != UNSET_SENTINEL:
|
|
639
|
+
if val is not None or k not in optional_fields:
|
|
640
|
+
m[k] = val
|
|
641
|
+
|
|
642
|
+
return m
|
|
643
|
+
|
|
542
644
|
|
|
543
645
|
class GetPromptVersion22TypedDict(TypedDict):
|
|
544
646
|
r"""The image part of the prompt message. Only supported with vision models."""
|
|
@@ -606,7 +708,7 @@ GetPromptVersionContent = TypeAliasType(
|
|
|
606
708
|
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
607
709
|
|
|
608
710
|
|
|
609
|
-
|
|
711
|
+
GetPromptVersionPromptsType = Literal["function",]
|
|
610
712
|
|
|
611
713
|
|
|
612
714
|
class GetPromptVersionFunctionTypedDict(TypedDict):
|
|
@@ -623,14 +725,14 @@ class GetPromptVersionFunction(BaseModel):
|
|
|
623
725
|
|
|
624
726
|
|
|
625
727
|
class GetPromptVersionToolCallsTypedDict(TypedDict):
|
|
626
|
-
type:
|
|
728
|
+
type: GetPromptVersionPromptsType
|
|
627
729
|
function: GetPromptVersionFunctionTypedDict
|
|
628
730
|
id: NotRequired[str]
|
|
629
731
|
index: NotRequired[float]
|
|
630
732
|
|
|
631
733
|
|
|
632
734
|
class GetPromptVersionToolCalls(BaseModel):
|
|
633
|
-
type:
|
|
735
|
+
type: GetPromptVersionPromptsType
|
|
634
736
|
|
|
635
737
|
function: GetPromptVersionFunction
|
|
636
738
|
|
|
@@ -638,6 +740,22 @@ class GetPromptVersionToolCalls(BaseModel):
|
|
|
638
740
|
|
|
639
741
|
index: Optional[float] = None
|
|
640
742
|
|
|
743
|
+
@model_serializer(mode="wrap")
|
|
744
|
+
def serialize_model(self, handler):
|
|
745
|
+
optional_fields = set(["id", "index"])
|
|
746
|
+
serialized = handler(self)
|
|
747
|
+
m = {}
|
|
748
|
+
|
|
749
|
+
for n, f in type(self).model_fields.items():
|
|
750
|
+
k = f.alias or n
|
|
751
|
+
val = serialized.get(k)
|
|
752
|
+
|
|
753
|
+
if val != UNSET_SENTINEL:
|
|
754
|
+
if val is not None or k not in optional_fields:
|
|
755
|
+
m[k] = val
|
|
756
|
+
|
|
757
|
+
return m
|
|
758
|
+
|
|
641
759
|
|
|
642
760
|
class GetPromptVersionMessagesTypedDict(TypedDict):
|
|
643
761
|
role: GetPromptVersionRole
|
|
@@ -661,61 +779,62 @@ class GetPromptVersionMessages(BaseModel):
|
|
|
661
779
|
|
|
662
780
|
@model_serializer(mode="wrap")
|
|
663
781
|
def serialize_model(self, handler):
|
|
664
|
-
optional_fields = ["tool_calls", "tool_call_id"]
|
|
665
|
-
nullable_fields = ["content", "tool_call_id"]
|
|
666
|
-
null_default_fields = []
|
|
667
|
-
|
|
782
|
+
optional_fields = set(["tool_calls", "tool_call_id"])
|
|
783
|
+
nullable_fields = set(["content", "tool_call_id"])
|
|
668
784
|
serialized = handler(self)
|
|
669
|
-
|
|
670
785
|
m = {}
|
|
671
786
|
|
|
672
787
|
for n, f in type(self).model_fields.items():
|
|
673
788
|
k = f.alias or n
|
|
674
789
|
val = serialized.get(k)
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
):
|
|
688
|
-
m[k] = val
|
|
790
|
+
is_nullable_and_explicitly_set = (
|
|
791
|
+
k in nullable_fields
|
|
792
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
793
|
+
)
|
|
794
|
+
|
|
795
|
+
if val != UNSET_SENTINEL:
|
|
796
|
+
if (
|
|
797
|
+
val is not None
|
|
798
|
+
or k not in optional_fields
|
|
799
|
+
or is_nullable_and_explicitly_set
|
|
800
|
+
):
|
|
801
|
+
m[k] = val
|
|
689
802
|
|
|
690
803
|
return m
|
|
691
804
|
|
|
692
805
|
|
|
806
|
+
@deprecated(
|
|
807
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
808
|
+
)
|
|
693
809
|
class GetPromptVersionPromptConfigTypedDict(TypedDict):
|
|
694
|
-
r"""A list of messages compatible with the openAI schema"""
|
|
810
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
695
811
|
|
|
696
812
|
messages: List[GetPromptVersionMessagesTypedDict]
|
|
697
813
|
stream: NotRequired[bool]
|
|
698
|
-
model: NotRequired[str]
|
|
814
|
+
model: NotRequired[Nullable[str]]
|
|
699
815
|
model_db_id: NotRequired[Nullable[str]]
|
|
700
816
|
r"""The id of the resource"""
|
|
701
817
|
model_type: NotRequired[Nullable[GetPromptVersionModelType]]
|
|
702
818
|
r"""The modality of the model"""
|
|
703
819
|
model_parameters: NotRequired[GetPromptVersionModelParametersTypedDict]
|
|
704
820
|
r"""Model Parameters: Not all parameters apply to every model"""
|
|
705
|
-
provider: NotRequired[GetPromptVersionProvider]
|
|
821
|
+
provider: NotRequired[Nullable[GetPromptVersionProvider]]
|
|
706
822
|
integration_id: NotRequired[Nullable[str]]
|
|
707
823
|
r"""The ID of the integration to use"""
|
|
708
824
|
version: NotRequired[str]
|
|
709
825
|
|
|
710
826
|
|
|
827
|
+
@deprecated(
|
|
828
|
+
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
829
|
+
)
|
|
711
830
|
class GetPromptVersionPromptConfig(BaseModel):
|
|
712
|
-
r"""A list of messages compatible with the openAI schema"""
|
|
831
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
713
832
|
|
|
714
833
|
messages: List[GetPromptVersionMessages]
|
|
715
834
|
|
|
716
835
|
stream: Optional[bool] = None
|
|
717
836
|
|
|
718
|
-
model:
|
|
837
|
+
model: OptionalNullable[str] = UNSET
|
|
719
838
|
|
|
720
839
|
model_db_id: OptionalNullable[str] = UNSET
|
|
721
840
|
r"""The id of the resource"""
|
|
@@ -726,7 +845,7 @@ class GetPromptVersionPromptConfig(BaseModel):
|
|
|
726
845
|
model_parameters: Optional[GetPromptVersionModelParameters] = None
|
|
727
846
|
r"""Model Parameters: Not all parameters apply to every model"""
|
|
728
847
|
|
|
729
|
-
provider:
|
|
848
|
+
provider: OptionalNullable[GetPromptVersionProvider] = UNSET
|
|
730
849
|
|
|
731
850
|
integration_id: OptionalNullable[str] = UNSET
|
|
732
851
|
r"""The ID of the integration to use"""
|
|
@@ -735,184 +854,1519 @@ class GetPromptVersionPromptConfig(BaseModel):
|
|
|
735
854
|
|
|
736
855
|
@model_serializer(mode="wrap")
|
|
737
856
|
def serialize_model(self, handler):
|
|
738
|
-
optional_fields =
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
857
|
+
optional_fields = set(
|
|
858
|
+
[
|
|
859
|
+
"stream",
|
|
860
|
+
"model",
|
|
861
|
+
"model_db_id",
|
|
862
|
+
"model_type",
|
|
863
|
+
"model_parameters",
|
|
864
|
+
"provider",
|
|
865
|
+
"integration_id",
|
|
866
|
+
"version",
|
|
867
|
+
]
|
|
868
|
+
)
|
|
869
|
+
nullable_fields = set(
|
|
870
|
+
["model", "model_db_id", "model_type", "provider", "integration_id"]
|
|
871
|
+
)
|
|
751
872
|
serialized = handler(self)
|
|
752
|
-
|
|
753
873
|
m = {}
|
|
754
874
|
|
|
755
875
|
for n, f in type(self).model_fields.items():
|
|
756
876
|
k = f.alias or n
|
|
757
877
|
val = serialized.get(k)
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
):
|
|
771
|
-
m[k] = val
|
|
878
|
+
is_nullable_and_explicitly_set = (
|
|
879
|
+
k in nullable_fields
|
|
880
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
881
|
+
)
|
|
882
|
+
|
|
883
|
+
if val != UNSET_SENTINEL:
|
|
884
|
+
if (
|
|
885
|
+
val is not None
|
|
886
|
+
or k not in optional_fields
|
|
887
|
+
or is_nullable_and_explicitly_set
|
|
888
|
+
):
|
|
889
|
+
m[k] = val
|
|
772
890
|
|
|
773
891
|
return m
|
|
774
892
|
|
|
775
893
|
|
|
776
|
-
|
|
777
|
-
"
|
|
778
|
-
"
|
|
779
|
-
"
|
|
780
|
-
"
|
|
781
|
-
"
|
|
782
|
-
"
|
|
783
|
-
"Code understanding",
|
|
784
|
-
"Code writing",
|
|
785
|
-
"Conversation",
|
|
786
|
-
"Documents QA",
|
|
787
|
-
"Evaluation",
|
|
788
|
-
"Extraction",
|
|
789
|
-
"Multi-modal",
|
|
790
|
-
"Self-checking",
|
|
791
|
-
"Sentiment analysis",
|
|
792
|
-
"SQL",
|
|
793
|
-
"Summarization",
|
|
794
|
-
"Tagging",
|
|
795
|
-
"Translation (document)",
|
|
796
|
-
"Translation (sentences)",
|
|
894
|
+
GetPromptVersionVoice = Literal[
|
|
895
|
+
"alloy",
|
|
896
|
+
"echo",
|
|
897
|
+
"fable",
|
|
898
|
+
"onyx",
|
|
899
|
+
"nova",
|
|
900
|
+
"shimmer",
|
|
797
901
|
]
|
|
902
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
798
903
|
|
|
799
904
|
|
|
800
|
-
|
|
801
|
-
"
|
|
802
|
-
"
|
|
803
|
-
"
|
|
804
|
-
"
|
|
805
|
-
"
|
|
806
|
-
"Russian",
|
|
807
|
-
"Spanish",
|
|
905
|
+
GetPromptVersionPromptsFormat = Literal[
|
|
906
|
+
"wav",
|
|
907
|
+
"mp3",
|
|
908
|
+
"flac",
|
|
909
|
+
"opus",
|
|
910
|
+
"pcm16",
|
|
808
911
|
]
|
|
809
|
-
r"""
|
|
912
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
810
913
|
|
|
811
914
|
|
|
812
|
-
class
|
|
813
|
-
|
|
814
|
-
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
815
|
-
language: NotRequired[Nullable[GetPromptVersionLanguage]]
|
|
816
|
-
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
915
|
+
class GetPromptVersionAudioTypedDict(TypedDict):
|
|
916
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
817
917
|
|
|
918
|
+
voice: GetPromptVersionVoice
|
|
919
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
920
|
+
format_: GetPromptVersionPromptsFormat
|
|
921
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
818
922
|
|
|
819
|
-
class GetPromptVersionMetadata(BaseModel):
|
|
820
|
-
use_cases: Optional[List[GetPromptVersionUseCases]] = None
|
|
821
|
-
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
822
923
|
|
|
823
|
-
|
|
824
|
-
r"""
|
|
924
|
+
class GetPromptVersionAudio(BaseModel):
|
|
925
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
926
|
+
|
|
927
|
+
voice: GetPromptVersionVoice
|
|
928
|
+
r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
|
|
929
|
+
|
|
930
|
+
format_: Annotated[GetPromptVersionPromptsFormat, pydantic.Field(alias="format")]
|
|
931
|
+
r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
|
|
932
|
+
|
|
933
|
+
|
|
934
|
+
GetPromptVersionResponseFormatPromptsResponseType = Literal["json_schema",]
|
|
935
|
+
|
|
936
|
+
|
|
937
|
+
class GetPromptVersionResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
938
|
+
name: str
|
|
939
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
940
|
+
description: NotRequired[str]
|
|
941
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
942
|
+
schema_: NotRequired[Any]
|
|
943
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
944
|
+
strict: NotRequired[bool]
|
|
945
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
946
|
+
|
|
947
|
+
|
|
948
|
+
class GetPromptVersionResponseFormatJSONSchema(BaseModel):
|
|
949
|
+
name: str
|
|
950
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
951
|
+
|
|
952
|
+
description: Optional[str] = None
|
|
953
|
+
r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
|
|
954
|
+
|
|
955
|
+
schema_: Annotated[Optional[Any], pydantic.Field(alias="schema")] = None
|
|
956
|
+
r"""The schema for the response format, described as a JSON Schema object."""
|
|
957
|
+
|
|
958
|
+
strict: Optional[bool] = False
|
|
959
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
825
960
|
|
|
826
961
|
@model_serializer(mode="wrap")
|
|
827
962
|
def serialize_model(self, handler):
|
|
828
|
-
optional_fields = ["
|
|
829
|
-
nullable_fields = ["language"]
|
|
830
|
-
null_default_fields = []
|
|
831
|
-
|
|
963
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
832
964
|
serialized = handler(self)
|
|
833
|
-
|
|
834
965
|
m = {}
|
|
835
966
|
|
|
836
967
|
for n, f in type(self).model_fields.items():
|
|
837
968
|
k = f.alias or n
|
|
838
969
|
val = serialized.get(k)
|
|
839
|
-
serialized.pop(k, None)
|
|
840
|
-
|
|
841
|
-
optional_nullable = k in optional_fields and k in nullable_fields
|
|
842
|
-
is_set = (
|
|
843
|
-
self.__pydantic_fields_set__.intersection({n})
|
|
844
|
-
or k in null_default_fields
|
|
845
|
-
) # pylint: disable=no-member
|
|
846
970
|
|
|
847
|
-
if val
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
not k in optional_fields or (optional_nullable and is_set)
|
|
851
|
-
):
|
|
852
|
-
m[k] = val
|
|
971
|
+
if val != UNSET_SENTINEL:
|
|
972
|
+
if val is not None or k not in optional_fields:
|
|
973
|
+
m[k] = val
|
|
853
974
|
|
|
854
975
|
return m
|
|
855
976
|
|
|
856
977
|
|
|
857
|
-
class
|
|
858
|
-
r"""
|
|
978
|
+
class GetPromptVersionResponseFormatPromptsJSONSchemaTypedDict(TypedDict):
|
|
979
|
+
r"""
|
|
859
980
|
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
r"""A list of messages compatible with the openAI schema"""
|
|
863
|
-
timestamp: str
|
|
864
|
-
created_by_id: NotRequired[Nullable[str]]
|
|
865
|
-
updated_by_id: NotRequired[Nullable[str]]
|
|
866
|
-
description: NotRequired[Nullable[str]]
|
|
867
|
-
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
868
|
-
metadata: NotRequired[GetPromptVersionMetadataTypedDict]
|
|
981
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
982
|
+
"""
|
|
869
983
|
|
|
984
|
+
type: GetPromptVersionResponseFormatPromptsResponseType
|
|
985
|
+
json_schema: GetPromptVersionResponseFormatJSONSchemaTypedDict
|
|
870
986
|
|
|
871
|
-
class GetPromptVersionResponseBody(BaseModel):
|
|
872
|
-
r"""Prompt version retrieved successfully."""
|
|
873
987
|
|
|
874
|
-
|
|
988
|
+
class GetPromptVersionResponseFormatPromptsJSONSchema(BaseModel):
|
|
989
|
+
r"""
|
|
875
990
|
|
|
876
|
-
|
|
877
|
-
|
|
991
|
+
JSON Schema response format. Used to generate structured JSON responses
|
|
992
|
+
"""
|
|
878
993
|
|
|
879
|
-
|
|
994
|
+
type: GetPromptVersionResponseFormatPromptsResponseType
|
|
880
995
|
|
|
881
|
-
|
|
996
|
+
json_schema: GetPromptVersionResponseFormatJSONSchema
|
|
882
997
|
|
|
883
|
-
updated_by_id: OptionalNullable[str] = UNSET
|
|
884
998
|
|
|
885
|
-
|
|
886
|
-
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
999
|
+
GetPromptVersionResponseFormatPromptsType = Literal["json_object",]
|
|
887
1000
|
|
|
888
|
-
|
|
1001
|
+
|
|
1002
|
+
class GetPromptVersionResponseFormatJSONObjectTypedDict(TypedDict):
|
|
1003
|
+
r"""
|
|
1004
|
+
|
|
1005
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
1006
|
+
"""
|
|
1007
|
+
|
|
1008
|
+
type: GetPromptVersionResponseFormatPromptsType
|
|
1009
|
+
|
|
1010
|
+
|
|
1011
|
+
class GetPromptVersionResponseFormatJSONObject(BaseModel):
|
|
1012
|
+
r"""
|
|
1013
|
+
|
|
1014
|
+
JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
|
|
1015
|
+
"""
|
|
1016
|
+
|
|
1017
|
+
type: GetPromptVersionResponseFormatPromptsType
|
|
1018
|
+
|
|
1019
|
+
|
|
1020
|
+
GetPromptVersionResponseFormatType = Literal["text",]
|
|
1021
|
+
|
|
1022
|
+
|
|
1023
|
+
class GetPromptVersionResponseFormatTextTypedDict(TypedDict):
|
|
1024
|
+
r"""
|
|
1025
|
+
|
|
1026
|
+
Default response format. Used to generate text responses
|
|
1027
|
+
"""
|
|
1028
|
+
|
|
1029
|
+
type: GetPromptVersionResponseFormatType
|
|
1030
|
+
|
|
1031
|
+
|
|
1032
|
+
class GetPromptVersionResponseFormatText(BaseModel):
|
|
1033
|
+
r"""
|
|
1034
|
+
|
|
1035
|
+
Default response format. Used to generate text responses
|
|
1036
|
+
"""
|
|
1037
|
+
|
|
1038
|
+
type: GetPromptVersionResponseFormatType
|
|
1039
|
+
|
|
1040
|
+
|
|
1041
|
+
GetPromptVersionResponseFormatTypedDict = TypeAliasType(
|
|
1042
|
+
"GetPromptVersionResponseFormatTypedDict",
|
|
1043
|
+
Union[
|
|
1044
|
+
GetPromptVersionResponseFormatTextTypedDict,
|
|
1045
|
+
GetPromptVersionResponseFormatJSONObjectTypedDict,
|
|
1046
|
+
GetPromptVersionResponseFormatPromptsJSONSchemaTypedDict,
|
|
1047
|
+
],
|
|
1048
|
+
)
|
|
1049
|
+
r"""An object specifying the format that the model must output"""
|
|
1050
|
+
|
|
1051
|
+
|
|
1052
|
+
GetPromptVersionResponseFormat = Annotated[
|
|
1053
|
+
Union[
|
|
1054
|
+
Annotated[GetPromptVersionResponseFormatText, Tag("text")],
|
|
1055
|
+
Annotated[GetPromptVersionResponseFormatJSONObject, Tag("json_object")],
|
|
1056
|
+
Annotated[GetPromptVersionResponseFormatPromptsJSONSchema, Tag("json_schema")],
|
|
1057
|
+
],
|
|
1058
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1059
|
+
]
|
|
1060
|
+
r"""An object specifying the format that the model must output"""
|
|
1061
|
+
|
|
1062
|
+
|
|
1063
|
+
GetPromptVersionReasoningEffort = Literal[
|
|
1064
|
+
"none",
|
|
1065
|
+
"minimal",
|
|
1066
|
+
"low",
|
|
1067
|
+
"medium",
|
|
1068
|
+
"high",
|
|
1069
|
+
"xhigh",
|
|
1070
|
+
]
|
|
1071
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1072
|
+
|
|
1073
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1074
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1075
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1076
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1077
|
+
|
|
1078
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
1079
|
+
"""
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
GetPromptVersionStopTypedDict = TypeAliasType(
|
|
1083
|
+
"GetPromptVersionStopTypedDict", Union[str, List[str]]
|
|
1084
|
+
)
|
|
1085
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1086
|
+
|
|
1087
|
+
|
|
1088
|
+
GetPromptVersionStop = TypeAliasType("GetPromptVersionStop", Union[str, List[str]])
|
|
1089
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1090
|
+
|
|
1091
|
+
|
|
1092
|
+
class GetPromptVersionStreamOptionsTypedDict(TypedDict):
|
|
1093
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1094
|
+
|
|
1095
|
+
include_usage: NotRequired[bool]
|
|
1096
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
1097
|
+
|
|
1098
|
+
|
|
1099
|
+
class GetPromptVersionStreamOptions(BaseModel):
|
|
1100
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1101
|
+
|
|
1102
|
+
include_usage: Optional[bool] = None
|
|
1103
|
+
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
889
1104
|
|
|
890
1105
|
@model_serializer(mode="wrap")
|
|
891
1106
|
def serialize_model(self, handler):
|
|
892
|
-
optional_fields = ["
|
|
893
|
-
nullable_fields = ["created_by_id", "updated_by_id", "description"]
|
|
894
|
-
null_default_fields = []
|
|
895
|
-
|
|
1107
|
+
optional_fields = set(["include_usage"])
|
|
896
1108
|
serialized = handler(self)
|
|
1109
|
+
m = {}
|
|
897
1110
|
|
|
1111
|
+
for n, f in type(self).model_fields.items():
|
|
1112
|
+
k = f.alias or n
|
|
1113
|
+
val = serialized.get(k)
|
|
1114
|
+
|
|
1115
|
+
if val != UNSET_SENTINEL:
|
|
1116
|
+
if val is not None or k not in optional_fields:
|
|
1117
|
+
m[k] = val
|
|
1118
|
+
|
|
1119
|
+
return m
|
|
1120
|
+
|
|
1121
|
+
|
|
1122
|
+
GetPromptVersionThinkingTypedDict = TypeAliasType(
|
|
1123
|
+
"GetPromptVersionThinkingTypedDict",
|
|
1124
|
+
Union[ThinkingConfigDisabledSchemaTypedDict, ThinkingConfigEnabledSchemaTypedDict],
|
|
1125
|
+
)
|
|
1126
|
+
|
|
1127
|
+
|
|
1128
|
+
GetPromptVersionThinking = Annotated[
|
|
1129
|
+
Union[
|
|
1130
|
+
Annotated[ThinkingConfigDisabledSchema, Tag("disabled")],
|
|
1131
|
+
Annotated[ThinkingConfigEnabledSchema, Tag("enabled")],
|
|
1132
|
+
],
|
|
1133
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1134
|
+
]
|
|
1135
|
+
|
|
1136
|
+
|
|
1137
|
+
GetPromptVersionToolChoiceType = Literal["function",]
|
|
1138
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
1139
|
+
|
|
1140
|
+
|
|
1141
|
+
class GetPromptVersionToolChoiceFunctionTypedDict(TypedDict):
|
|
1142
|
+
name: str
|
|
1143
|
+
r"""The name of the function to call."""
|
|
1144
|
+
|
|
1145
|
+
|
|
1146
|
+
class GetPromptVersionToolChoiceFunction(BaseModel):
|
|
1147
|
+
name: str
|
|
1148
|
+
r"""The name of the function to call."""
|
|
1149
|
+
|
|
1150
|
+
|
|
1151
|
+
class GetPromptVersionToolChoice2TypedDict(TypedDict):
|
|
1152
|
+
function: GetPromptVersionToolChoiceFunctionTypedDict
|
|
1153
|
+
type: NotRequired[GetPromptVersionToolChoiceType]
|
|
1154
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
1155
|
+
|
|
1156
|
+
|
|
1157
|
+
class GetPromptVersionToolChoice2(BaseModel):
|
|
1158
|
+
function: GetPromptVersionToolChoiceFunction
|
|
1159
|
+
|
|
1160
|
+
type: Optional[GetPromptVersionToolChoiceType] = None
|
|
1161
|
+
r"""The type of the tool. Currently, only function is supported."""
|
|
1162
|
+
|
|
1163
|
+
@model_serializer(mode="wrap")
|
|
1164
|
+
def serialize_model(self, handler):
|
|
1165
|
+
optional_fields = set(["type"])
|
|
1166
|
+
serialized = handler(self)
|
|
1167
|
+
m = {}
|
|
1168
|
+
|
|
1169
|
+
for n, f in type(self).model_fields.items():
|
|
1170
|
+
k = f.alias or n
|
|
1171
|
+
val = serialized.get(k)
|
|
1172
|
+
|
|
1173
|
+
if val != UNSET_SENTINEL:
|
|
1174
|
+
if val is not None or k not in optional_fields:
|
|
1175
|
+
m[k] = val
|
|
1176
|
+
|
|
1177
|
+
return m
|
|
1178
|
+
|
|
1179
|
+
|
|
1180
|
+
GetPromptVersionToolChoice1 = Literal[
|
|
1181
|
+
"none",
|
|
1182
|
+
"auto",
|
|
1183
|
+
"required",
|
|
1184
|
+
]
|
|
1185
|
+
|
|
1186
|
+
|
|
1187
|
+
GetPromptVersionToolChoiceTypedDict = TypeAliasType(
|
|
1188
|
+
"GetPromptVersionToolChoiceTypedDict",
|
|
1189
|
+
Union[GetPromptVersionToolChoice2TypedDict, GetPromptVersionToolChoice1],
|
|
1190
|
+
)
|
|
1191
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1192
|
+
|
|
1193
|
+
|
|
1194
|
+
GetPromptVersionToolChoice = TypeAliasType(
|
|
1195
|
+
"GetPromptVersionToolChoice",
|
|
1196
|
+
Union[GetPromptVersionToolChoice2, GetPromptVersionToolChoice1],
|
|
1197
|
+
)
|
|
1198
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1199
|
+
|
|
1200
|
+
|
|
1201
|
+
GetPromptVersionModalities = Literal[
|
|
1202
|
+
"text",
|
|
1203
|
+
"audio",
|
|
1204
|
+
]
|
|
1205
|
+
|
|
1206
|
+
|
|
1207
|
+
GetPromptVersionID1 = Literal[
|
|
1208
|
+
"orq_pii_detection",
|
|
1209
|
+
"orq_sexual_moderation",
|
|
1210
|
+
"orq_harmful_moderation",
|
|
1211
|
+
]
|
|
1212
|
+
r"""The key of the guardrail."""
|
|
1213
|
+
|
|
1214
|
+
|
|
1215
|
+
GetPromptVersionIDTypedDict = TypeAliasType(
|
|
1216
|
+
"GetPromptVersionIDTypedDict", Union[GetPromptVersionID1, str]
|
|
1217
|
+
)
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
GetPromptVersionID = TypeAliasType(
|
|
1221
|
+
"GetPromptVersionID", Union[GetPromptVersionID1, str]
|
|
1222
|
+
)
|
|
1223
|
+
|
|
1224
|
+
|
|
1225
|
+
GetPromptVersionExecuteOn = Literal[
|
|
1226
|
+
"input",
|
|
1227
|
+
"output",
|
|
1228
|
+
]
|
|
1229
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1230
|
+
|
|
1231
|
+
|
|
1232
|
+
class GetPromptVersionGuardrailsTypedDict(TypedDict):
|
|
1233
|
+
id: GetPromptVersionIDTypedDict
|
|
1234
|
+
execute_on: GetPromptVersionExecuteOn
|
|
1235
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1236
|
+
|
|
1237
|
+
|
|
1238
|
+
class GetPromptVersionGuardrails(BaseModel):
|
|
1239
|
+
id: GetPromptVersionID
|
|
1240
|
+
|
|
1241
|
+
execute_on: GetPromptVersionExecuteOn
|
|
1242
|
+
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1243
|
+
|
|
1244
|
+
|
|
1245
|
+
class GetPromptVersionFallbacksTypedDict(TypedDict):
|
|
1246
|
+
model: str
|
|
1247
|
+
r"""Fallback model identifier"""
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
class GetPromptVersionFallbacks(BaseModel):
|
|
1251
|
+
model: str
|
|
1252
|
+
r"""Fallback model identifier"""
|
|
1253
|
+
|
|
1254
|
+
|
|
1255
|
+
class GetPromptVersionRetryTypedDict(TypedDict):
|
|
1256
|
+
r"""Retry configuration for the request"""
|
|
1257
|
+
|
|
1258
|
+
count: NotRequired[float]
|
|
1259
|
+
r"""Number of retry attempts (1-5)"""
|
|
1260
|
+
on_codes: NotRequired[List[float]]
|
|
1261
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1262
|
+
|
|
1263
|
+
|
|
1264
|
+
class GetPromptVersionRetry(BaseModel):
|
|
1265
|
+
r"""Retry configuration for the request"""
|
|
1266
|
+
|
|
1267
|
+
count: Optional[float] = 3
|
|
1268
|
+
r"""Number of retry attempts (1-5)"""
|
|
1269
|
+
|
|
1270
|
+
on_codes: Optional[List[float]] = None
|
|
1271
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1272
|
+
|
|
1273
|
+
@model_serializer(mode="wrap")
|
|
1274
|
+
def serialize_model(self, handler):
|
|
1275
|
+
optional_fields = set(["count", "on_codes"])
|
|
1276
|
+
serialized = handler(self)
|
|
1277
|
+
m = {}
|
|
1278
|
+
|
|
1279
|
+
for n, f in type(self).model_fields.items():
|
|
1280
|
+
k = f.alias or n
|
|
1281
|
+
val = serialized.get(k)
|
|
1282
|
+
|
|
1283
|
+
if val != UNSET_SENTINEL:
|
|
1284
|
+
if val is not None or k not in optional_fields:
|
|
1285
|
+
m[k] = val
|
|
1286
|
+
|
|
1287
|
+
return m
|
|
1288
|
+
|
|
1289
|
+
|
|
1290
|
+
GetPromptVersionType = Literal["exact_match",]
|
|
1291
|
+
|
|
1292
|
+
|
|
1293
|
+
class GetPromptVersionCacheTypedDict(TypedDict):
|
|
1294
|
+
r"""Cache configuration for the request."""
|
|
1295
|
+
|
|
1296
|
+
type: GetPromptVersionType
|
|
1297
|
+
ttl: NotRequired[float]
|
|
1298
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1299
|
+
|
|
1300
|
+
|
|
1301
|
+
class GetPromptVersionCache(BaseModel):
|
|
1302
|
+
r"""Cache configuration for the request."""
|
|
1303
|
+
|
|
1304
|
+
type: GetPromptVersionType
|
|
1305
|
+
|
|
1306
|
+
ttl: Optional[float] = 1800
|
|
1307
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1308
|
+
|
|
1309
|
+
@model_serializer(mode="wrap")
|
|
1310
|
+
def serialize_model(self, handler):
|
|
1311
|
+
optional_fields = set(["ttl"])
|
|
1312
|
+
serialized = handler(self)
|
|
1313
|
+
m = {}
|
|
1314
|
+
|
|
1315
|
+
for n, f in type(self).model_fields.items():
|
|
1316
|
+
k = f.alias or n
|
|
1317
|
+
val = serialized.get(k)
|
|
1318
|
+
|
|
1319
|
+
if val != UNSET_SENTINEL:
|
|
1320
|
+
if val is not None or k not in optional_fields:
|
|
1321
|
+
m[k] = val
|
|
1322
|
+
|
|
1323
|
+
return m
|
|
1324
|
+
|
|
1325
|
+
|
|
1326
|
+
GetPromptVersionLoadBalancerType = Literal["weight_based",]
|
|
1327
|
+
|
|
1328
|
+
|
|
1329
|
+
class GetPromptVersionLoadBalancerModelsTypedDict(TypedDict):
|
|
1330
|
+
model: str
|
|
1331
|
+
r"""Model identifier for load balancing"""
|
|
1332
|
+
weight: NotRequired[float]
|
|
1333
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1334
|
+
|
|
1335
|
+
|
|
1336
|
+
class GetPromptVersionLoadBalancerModels(BaseModel):
|
|
1337
|
+
model: str
|
|
1338
|
+
r"""Model identifier for load balancing"""
|
|
1339
|
+
|
|
1340
|
+
weight: Optional[float] = 0.5
|
|
1341
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1342
|
+
|
|
1343
|
+
@model_serializer(mode="wrap")
|
|
1344
|
+
def serialize_model(self, handler):
|
|
1345
|
+
optional_fields = set(["weight"])
|
|
1346
|
+
serialized = handler(self)
|
|
1347
|
+
m = {}
|
|
1348
|
+
|
|
1349
|
+
for n, f in type(self).model_fields.items():
|
|
1350
|
+
k = f.alias or n
|
|
1351
|
+
val = serialized.get(k)
|
|
1352
|
+
|
|
1353
|
+
if val != UNSET_SENTINEL:
|
|
1354
|
+
if val is not None or k not in optional_fields:
|
|
1355
|
+
m[k] = val
|
|
1356
|
+
|
|
1357
|
+
return m
|
|
1358
|
+
|
|
1359
|
+
|
|
1360
|
+
class GetPromptVersionLoadBalancer1TypedDict(TypedDict):
|
|
1361
|
+
type: GetPromptVersionLoadBalancerType
|
|
1362
|
+
models: List[GetPromptVersionLoadBalancerModelsTypedDict]
|
|
1363
|
+
|
|
1364
|
+
|
|
1365
|
+
class GetPromptVersionLoadBalancer1(BaseModel):
|
|
1366
|
+
type: GetPromptVersionLoadBalancerType
|
|
1367
|
+
|
|
1368
|
+
models: List[GetPromptVersionLoadBalancerModels]
|
|
1369
|
+
|
|
1370
|
+
|
|
1371
|
+
GetPromptVersionLoadBalancerTypedDict = GetPromptVersionLoadBalancer1TypedDict
|
|
1372
|
+
r"""Load balancer configuration for the request."""
|
|
1373
|
+
|
|
1374
|
+
|
|
1375
|
+
GetPromptVersionLoadBalancer = GetPromptVersionLoadBalancer1
|
|
1376
|
+
r"""Load balancer configuration for the request."""
|
|
1377
|
+
|
|
1378
|
+
|
|
1379
|
+
class GetPromptVersionTimeoutTypedDict(TypedDict):
|
|
1380
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1381
|
+
|
|
1382
|
+
call_timeout: float
|
|
1383
|
+
r"""Timeout value in milliseconds"""
|
|
1384
|
+
|
|
1385
|
+
|
|
1386
|
+
class GetPromptVersionTimeout(BaseModel):
|
|
1387
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1388
|
+
|
|
1389
|
+
call_timeout: float
|
|
1390
|
+
r"""Timeout value in milliseconds"""
|
|
1391
|
+
|
|
1392
|
+
|
|
1393
|
+
GetPromptVersionMessagesPromptsResponse200Role = Literal["tool",]
|
|
1394
|
+
r"""The role of the messages author, in this case tool."""
|
|
1395
|
+
|
|
1396
|
+
|
|
1397
|
+
GetPromptVersionContentPromptsResponse2002TypedDict = TextContentPartSchemaTypedDict
|
|
1398
|
+
|
|
1399
|
+
|
|
1400
|
+
GetPromptVersionContentPromptsResponse2002 = TextContentPartSchema
|
|
1401
|
+
|
|
1402
|
+
|
|
1403
|
+
GetPromptVersionMessagesPromptsResponse200ContentTypedDict = TypeAliasType(
|
|
1404
|
+
"GetPromptVersionMessagesPromptsResponse200ContentTypedDict",
|
|
1405
|
+
Union[str, List[GetPromptVersionContentPromptsResponse2002TypedDict]],
|
|
1406
|
+
)
|
|
1407
|
+
r"""The contents of the tool message."""
|
|
1408
|
+
|
|
1409
|
+
|
|
1410
|
+
GetPromptVersionMessagesPromptsResponse200Content = TypeAliasType(
|
|
1411
|
+
"GetPromptVersionMessagesPromptsResponse200Content",
|
|
1412
|
+
Union[str, List[GetPromptVersionContentPromptsResponse2002]],
|
|
1413
|
+
)
|
|
1414
|
+
r"""The contents of the tool message."""
|
|
1415
|
+
|
|
1416
|
+
|
|
1417
|
+
GetPromptVersionMessagesPromptsType = Literal["ephemeral",]
|
|
1418
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1419
|
+
|
|
1420
|
+
|
|
1421
|
+
GetPromptVersionMessagesTTL = Literal[
|
|
1422
|
+
"5m",
|
|
1423
|
+
"1h",
|
|
1424
|
+
]
|
|
1425
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1426
|
+
|
|
1427
|
+
- `5m`: 5 minutes
|
|
1428
|
+
- `1h`: 1 hour
|
|
1429
|
+
|
|
1430
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1431
|
+
"""
|
|
1432
|
+
|
|
1433
|
+
|
|
1434
|
+
class GetPromptVersionMessagesCacheControlTypedDict(TypedDict):
|
|
1435
|
+
type: GetPromptVersionMessagesPromptsType
|
|
1436
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1437
|
+
ttl: NotRequired[GetPromptVersionMessagesTTL]
|
|
1438
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1439
|
+
|
|
1440
|
+
- `5m`: 5 minutes
|
|
1441
|
+
- `1h`: 1 hour
|
|
1442
|
+
|
|
1443
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1444
|
+
"""
|
|
1445
|
+
|
|
1446
|
+
|
|
1447
|
+
class GetPromptVersionMessagesCacheControl(BaseModel):
|
|
1448
|
+
type: GetPromptVersionMessagesPromptsType
|
|
1449
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1450
|
+
|
|
1451
|
+
ttl: Optional[GetPromptVersionMessagesTTL] = "5m"
|
|
1452
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1453
|
+
|
|
1454
|
+
- `5m`: 5 minutes
|
|
1455
|
+
- `1h`: 1 hour
|
|
1456
|
+
|
|
1457
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1458
|
+
"""
|
|
1459
|
+
|
|
1460
|
+
@model_serializer(mode="wrap")
|
|
1461
|
+
def serialize_model(self, handler):
|
|
1462
|
+
optional_fields = set(["ttl"])
|
|
1463
|
+
serialized = handler(self)
|
|
1464
|
+
m = {}
|
|
1465
|
+
|
|
1466
|
+
for n, f in type(self).model_fields.items():
|
|
1467
|
+
k = f.alias or n
|
|
1468
|
+
val = serialized.get(k)
|
|
1469
|
+
|
|
1470
|
+
if val != UNSET_SENTINEL:
|
|
1471
|
+
if val is not None or k not in optional_fields:
|
|
1472
|
+
m[k] = val
|
|
1473
|
+
|
|
1474
|
+
return m
|
|
1475
|
+
|
|
1476
|
+
|
|
1477
|
+
class GetPromptVersionMessagesToolMessageTypedDict(TypedDict):
|
|
1478
|
+
role: GetPromptVersionMessagesPromptsResponse200Role
|
|
1479
|
+
r"""The role of the messages author, in this case tool."""
|
|
1480
|
+
content: GetPromptVersionMessagesPromptsResponse200ContentTypedDict
|
|
1481
|
+
r"""The contents of the tool message."""
|
|
1482
|
+
tool_call_id: Nullable[str]
|
|
1483
|
+
r"""Tool call that this message is responding to."""
|
|
1484
|
+
cache_control: NotRequired[GetPromptVersionMessagesCacheControlTypedDict]
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
class GetPromptVersionMessagesToolMessage(BaseModel):
|
|
1488
|
+
role: GetPromptVersionMessagesPromptsResponse200Role
|
|
1489
|
+
r"""The role of the messages author, in this case tool."""
|
|
1490
|
+
|
|
1491
|
+
content: GetPromptVersionMessagesPromptsResponse200Content
|
|
1492
|
+
r"""The contents of the tool message."""
|
|
1493
|
+
|
|
1494
|
+
tool_call_id: Nullable[str]
|
|
1495
|
+
r"""Tool call that this message is responding to."""
|
|
1496
|
+
|
|
1497
|
+
cache_control: Optional[GetPromptVersionMessagesCacheControl] = None
|
|
1498
|
+
|
|
1499
|
+
@model_serializer(mode="wrap")
|
|
1500
|
+
def serialize_model(self, handler):
|
|
1501
|
+
optional_fields = set(["cache_control"])
|
|
1502
|
+
nullable_fields = set(["tool_call_id"])
|
|
1503
|
+
serialized = handler(self)
|
|
1504
|
+
m = {}
|
|
1505
|
+
|
|
1506
|
+
for n, f in type(self).model_fields.items():
|
|
1507
|
+
k = f.alias or n
|
|
1508
|
+
val = serialized.get(k)
|
|
1509
|
+
is_nullable_and_explicitly_set = (
|
|
1510
|
+
k in nullable_fields
|
|
1511
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1512
|
+
)
|
|
1513
|
+
|
|
1514
|
+
if val != UNSET_SENTINEL:
|
|
1515
|
+
if (
|
|
1516
|
+
val is not None
|
|
1517
|
+
or k not in optional_fields
|
|
1518
|
+
or is_nullable_and_explicitly_set
|
|
1519
|
+
):
|
|
1520
|
+
m[k] = val
|
|
1521
|
+
|
|
1522
|
+
return m
|
|
1523
|
+
|
|
1524
|
+
|
|
1525
|
+
GetPromptVersionContentPromptsResponse2TypedDict = TypeAliasType(
|
|
1526
|
+
"GetPromptVersionContentPromptsResponse2TypedDict",
|
|
1527
|
+
Union[
|
|
1528
|
+
RefusalPartSchemaTypedDict,
|
|
1529
|
+
RedactedReasoningPartSchemaTypedDict,
|
|
1530
|
+
TextContentPartSchemaTypedDict,
|
|
1531
|
+
ReasoningPartSchemaTypedDict,
|
|
1532
|
+
],
|
|
1533
|
+
)
|
|
1534
|
+
|
|
1535
|
+
|
|
1536
|
+
GetPromptVersionContentPromptsResponse2 = Annotated[
|
|
1537
|
+
Union[
|
|
1538
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
1539
|
+
Annotated[RefusalPartSchema, Tag("refusal")],
|
|
1540
|
+
Annotated[ReasoningPartSchema, Tag("reasoning")],
|
|
1541
|
+
Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
|
|
1542
|
+
],
|
|
1543
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1544
|
+
]
|
|
1545
|
+
|
|
1546
|
+
|
|
1547
|
+
GetPromptVersionMessagesPromptsResponseContentTypedDict = TypeAliasType(
|
|
1548
|
+
"GetPromptVersionMessagesPromptsResponseContentTypedDict",
|
|
1549
|
+
Union[str, List[GetPromptVersionContentPromptsResponse2TypedDict]],
|
|
1550
|
+
)
|
|
1551
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1552
|
+
|
|
1553
|
+
|
|
1554
|
+
GetPromptVersionMessagesPromptsResponseContent = TypeAliasType(
|
|
1555
|
+
"GetPromptVersionMessagesPromptsResponseContent",
|
|
1556
|
+
Union[str, List[GetPromptVersionContentPromptsResponse2]],
|
|
1557
|
+
)
|
|
1558
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1559
|
+
|
|
1560
|
+
|
|
1561
|
+
GetPromptVersionMessagesPromptsResponseRole = Literal["assistant",]
|
|
1562
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
1563
|
+
|
|
1564
|
+
|
|
1565
|
+
class GetPromptVersionMessagesAudioTypedDict(TypedDict):
|
|
1566
|
+
r"""Data about a previous audio response from the model."""
|
|
1567
|
+
|
|
1568
|
+
id: str
|
|
1569
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
1570
|
+
|
|
1571
|
+
|
|
1572
|
+
class GetPromptVersionMessagesAudio(BaseModel):
|
|
1573
|
+
r"""Data about a previous audio response from the model."""
|
|
1574
|
+
|
|
1575
|
+
id: str
|
|
1576
|
+
r"""Unique identifier for a previous audio response from the model."""
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
GetPromptVersionMessagesType = Literal["function",]
|
|
1580
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
1581
|
+
|
|
1582
|
+
|
|
1583
|
+
class GetPromptVersionMessagesFunctionTypedDict(TypedDict):
|
|
1584
|
+
name: NotRequired[str]
|
|
1585
|
+
r"""The name of the function to call."""
|
|
1586
|
+
arguments: NotRequired[str]
|
|
1587
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
1588
|
+
|
|
1589
|
+
|
|
1590
|
+
class GetPromptVersionMessagesFunction(BaseModel):
|
|
1591
|
+
name: Optional[str] = None
|
|
1592
|
+
r"""The name of the function to call."""
|
|
1593
|
+
|
|
1594
|
+
arguments: Optional[str] = None
|
|
1595
|
+
r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
|
|
1596
|
+
|
|
1597
|
+
@model_serializer(mode="wrap")
|
|
1598
|
+
def serialize_model(self, handler):
|
|
1599
|
+
optional_fields = set(["name", "arguments"])
|
|
1600
|
+
serialized = handler(self)
|
|
1601
|
+
m = {}
|
|
1602
|
+
|
|
1603
|
+
for n, f in type(self).model_fields.items():
|
|
1604
|
+
k = f.alias or n
|
|
1605
|
+
val = serialized.get(k)
|
|
1606
|
+
|
|
1607
|
+
if val != UNSET_SENTINEL:
|
|
1608
|
+
if val is not None or k not in optional_fields:
|
|
1609
|
+
m[k] = val
|
|
1610
|
+
|
|
1611
|
+
return m
|
|
1612
|
+
|
|
1613
|
+
|
|
1614
|
+
class GetPromptVersionMessagesToolCallsTypedDict(TypedDict):
|
|
1615
|
+
id: str
|
|
1616
|
+
r"""The ID of the tool call."""
|
|
1617
|
+
type: GetPromptVersionMessagesType
|
|
1618
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
1619
|
+
function: GetPromptVersionMessagesFunctionTypedDict
|
|
1620
|
+
thought_signature: NotRequired[str]
|
|
1621
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
1622
|
+
|
|
1623
|
+
|
|
1624
|
+
class GetPromptVersionMessagesToolCalls(BaseModel):
|
|
1625
|
+
id: str
|
|
1626
|
+
r"""The ID of the tool call."""
|
|
1627
|
+
|
|
1628
|
+
type: GetPromptVersionMessagesType
|
|
1629
|
+
r"""The type of the tool. Currently, only `function` is supported."""
|
|
1630
|
+
|
|
1631
|
+
function: GetPromptVersionMessagesFunction
|
|
1632
|
+
|
|
1633
|
+
thought_signature: Optional[str] = None
|
|
1634
|
+
r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
|
|
1635
|
+
|
|
1636
|
+
@model_serializer(mode="wrap")
|
|
1637
|
+
def serialize_model(self, handler):
|
|
1638
|
+
optional_fields = set(["thought_signature"])
|
|
1639
|
+
serialized = handler(self)
|
|
1640
|
+
m = {}
|
|
1641
|
+
|
|
1642
|
+
for n, f in type(self).model_fields.items():
|
|
1643
|
+
k = f.alias or n
|
|
1644
|
+
val = serialized.get(k)
|
|
1645
|
+
|
|
1646
|
+
if val != UNSET_SENTINEL:
|
|
1647
|
+
if val is not None or k not in optional_fields:
|
|
1648
|
+
m[k] = val
|
|
1649
|
+
|
|
1650
|
+
return m
|
|
1651
|
+
|
|
1652
|
+
|
|
1653
|
+
class GetPromptVersionMessagesAssistantMessageTypedDict(TypedDict):
|
|
1654
|
+
role: GetPromptVersionMessagesPromptsResponseRole
|
|
1655
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
1656
|
+
content: NotRequired[
|
|
1657
|
+
Nullable[GetPromptVersionMessagesPromptsResponseContentTypedDict]
|
|
1658
|
+
]
|
|
1659
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1660
|
+
refusal: NotRequired[Nullable[str]]
|
|
1661
|
+
r"""The refusal message by the assistant."""
|
|
1662
|
+
name: NotRequired[str]
|
|
1663
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1664
|
+
audio: NotRequired[Nullable[GetPromptVersionMessagesAudioTypedDict]]
|
|
1665
|
+
r"""Data about a previous audio response from the model."""
|
|
1666
|
+
tool_calls: NotRequired[List[GetPromptVersionMessagesToolCallsTypedDict]]
|
|
1667
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
1668
|
+
|
|
1669
|
+
|
|
1670
|
+
class GetPromptVersionMessagesAssistantMessage(BaseModel):
|
|
1671
|
+
role: GetPromptVersionMessagesPromptsResponseRole
|
|
1672
|
+
r"""The role of the messages author, in this case `assistant`."""
|
|
1673
|
+
|
|
1674
|
+
content: OptionalNullable[GetPromptVersionMessagesPromptsResponseContent] = UNSET
|
|
1675
|
+
r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
|
|
1676
|
+
|
|
1677
|
+
refusal: OptionalNullable[str] = UNSET
|
|
1678
|
+
r"""The refusal message by the assistant."""
|
|
1679
|
+
|
|
1680
|
+
name: Optional[str] = None
|
|
1681
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1682
|
+
|
|
1683
|
+
audio: OptionalNullable[GetPromptVersionMessagesAudio] = UNSET
|
|
1684
|
+
r"""Data about a previous audio response from the model."""
|
|
1685
|
+
|
|
1686
|
+
tool_calls: Optional[List[GetPromptVersionMessagesToolCalls]] = None
|
|
1687
|
+
r"""The tool calls generated by the model, such as function calls."""
|
|
1688
|
+
|
|
1689
|
+
@model_serializer(mode="wrap")
|
|
1690
|
+
def serialize_model(self, handler):
|
|
1691
|
+
optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
|
|
1692
|
+
nullable_fields = set(["content", "refusal", "audio"])
|
|
1693
|
+
serialized = handler(self)
|
|
1694
|
+
m = {}
|
|
1695
|
+
|
|
1696
|
+
for n, f in type(self).model_fields.items():
|
|
1697
|
+
k = f.alias or n
|
|
1698
|
+
val = serialized.get(k)
|
|
1699
|
+
is_nullable_and_explicitly_set = (
|
|
1700
|
+
k in nullable_fields
|
|
1701
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1702
|
+
)
|
|
1703
|
+
|
|
1704
|
+
if val != UNSET_SENTINEL:
|
|
1705
|
+
if (
|
|
1706
|
+
val is not None
|
|
1707
|
+
or k not in optional_fields
|
|
1708
|
+
or is_nullable_and_explicitly_set
|
|
1709
|
+
):
|
|
1710
|
+
m[k] = val
|
|
1711
|
+
|
|
1712
|
+
return m
|
|
1713
|
+
|
|
1714
|
+
|
|
1715
|
+
GetPromptVersionMessagesPromptsRole = Literal["user",]
|
|
1716
|
+
r"""The role of the messages author, in this case `user`."""
|
|
1717
|
+
|
|
1718
|
+
|
|
1719
|
+
GetPromptVersion2PromptsResponse200Type = Literal["file",]
|
|
1720
|
+
r"""The type of the content part. Always `file`."""
|
|
1721
|
+
|
|
1722
|
+
|
|
1723
|
+
GetPromptVersion2PromptsResponse200ApplicationJSONType = Literal["ephemeral",]
|
|
1724
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1725
|
+
|
|
1726
|
+
|
|
1727
|
+
GetPromptVersion2TTL = Literal[
|
|
1728
|
+
"5m",
|
|
1729
|
+
"1h",
|
|
1730
|
+
]
|
|
1731
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1732
|
+
|
|
1733
|
+
- `5m`: 5 minutes
|
|
1734
|
+
- `1h`: 1 hour
|
|
1735
|
+
|
|
1736
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1737
|
+
"""
|
|
1738
|
+
|
|
1739
|
+
|
|
1740
|
+
class GetPromptVersion2CacheControlTypedDict(TypedDict):
|
|
1741
|
+
type: GetPromptVersion2PromptsResponse200ApplicationJSONType
|
|
1742
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1743
|
+
ttl: NotRequired[GetPromptVersion2TTL]
|
|
1744
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1745
|
+
|
|
1746
|
+
- `5m`: 5 minutes
|
|
1747
|
+
- `1h`: 1 hour
|
|
1748
|
+
|
|
1749
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1750
|
+
"""
|
|
1751
|
+
|
|
1752
|
+
|
|
1753
|
+
class GetPromptVersion2CacheControl(BaseModel):
|
|
1754
|
+
type: GetPromptVersion2PromptsResponse200ApplicationJSONType
|
|
1755
|
+
r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
|
|
1756
|
+
|
|
1757
|
+
ttl: Optional[GetPromptVersion2TTL] = "5m"
|
|
1758
|
+
r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
|
|
1759
|
+
|
|
1760
|
+
- `5m`: 5 minutes
|
|
1761
|
+
- `1h`: 1 hour
|
|
1762
|
+
|
|
1763
|
+
Defaults to `5m`. Only supported by `Anthropic` Claude models.
|
|
1764
|
+
"""
|
|
1765
|
+
|
|
1766
|
+
@model_serializer(mode="wrap")
|
|
1767
|
+
def serialize_model(self, handler):
|
|
1768
|
+
optional_fields = set(["ttl"])
|
|
1769
|
+
serialized = handler(self)
|
|
1770
|
+
m = {}
|
|
1771
|
+
|
|
1772
|
+
for n, f in type(self).model_fields.items():
|
|
1773
|
+
k = f.alias or n
|
|
1774
|
+
val = serialized.get(k)
|
|
1775
|
+
|
|
1776
|
+
if val != UNSET_SENTINEL:
|
|
1777
|
+
if val is not None or k not in optional_fields:
|
|
1778
|
+
m[k] = val
|
|
1779
|
+
|
|
1780
|
+
return m
|
|
1781
|
+
|
|
1782
|
+
|
|
1783
|
+
class GetPromptVersion24TypedDict(TypedDict):
|
|
1784
|
+
type: GetPromptVersion2PromptsResponse200Type
|
|
1785
|
+
r"""The type of the content part. Always `file`."""
|
|
1786
|
+
file: FileContentPartSchemaTypedDict
|
|
1787
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
1788
|
+
cache_control: NotRequired[GetPromptVersion2CacheControlTypedDict]
|
|
1789
|
+
|
|
1790
|
+
|
|
1791
|
+
class GetPromptVersion24(BaseModel):
|
|
1792
|
+
type: GetPromptVersion2PromptsResponse200Type
|
|
1793
|
+
r"""The type of the content part. Always `file`."""
|
|
1794
|
+
|
|
1795
|
+
file: FileContentPartSchema
|
|
1796
|
+
r"""File data for the content part. Must contain either file_data or uri, but not both."""
|
|
1797
|
+
|
|
1798
|
+
cache_control: Optional[GetPromptVersion2CacheControl] = None
|
|
1799
|
+
|
|
1800
|
+
@model_serializer(mode="wrap")
|
|
1801
|
+
def serialize_model(self, handler):
|
|
1802
|
+
optional_fields = set(["cache_control"])
|
|
1803
|
+
serialized = handler(self)
|
|
1804
|
+
m = {}
|
|
1805
|
+
|
|
1806
|
+
for n, f in type(self).model_fields.items():
|
|
1807
|
+
k = f.alias or n
|
|
1808
|
+
val = serialized.get(k)
|
|
1809
|
+
|
|
1810
|
+
if val != UNSET_SENTINEL:
|
|
1811
|
+
if val is not None or k not in optional_fields:
|
|
1812
|
+
m[k] = val
|
|
1813
|
+
|
|
1814
|
+
return m
|
|
1815
|
+
|
|
1816
|
+
|
|
1817
|
+
GetPromptVersionContentPrompts2TypedDict = TypeAliasType(
|
|
1818
|
+
"GetPromptVersionContentPrompts2TypedDict",
|
|
1819
|
+
Union[
|
|
1820
|
+
AudioContentPartSchemaTypedDict,
|
|
1821
|
+
TextContentPartSchemaTypedDict,
|
|
1822
|
+
ImageContentPartSchemaTypedDict,
|
|
1823
|
+
GetPromptVersion24TypedDict,
|
|
1824
|
+
],
|
|
1825
|
+
)
|
|
1826
|
+
|
|
1827
|
+
|
|
1828
|
+
GetPromptVersionContentPrompts2 = Annotated[
|
|
1829
|
+
Union[
|
|
1830
|
+
Annotated[TextContentPartSchema, Tag("text")],
|
|
1831
|
+
Annotated[ImageContentPartSchema, Tag("image_url")],
|
|
1832
|
+
Annotated[AudioContentPartSchema, Tag("input_audio")],
|
|
1833
|
+
Annotated[GetPromptVersion24, Tag("file")],
|
|
1834
|
+
],
|
|
1835
|
+
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
1836
|
+
]
|
|
1837
|
+
|
|
1838
|
+
|
|
1839
|
+
GetPromptVersionMessagesPromptsContentTypedDict = TypeAliasType(
|
|
1840
|
+
"GetPromptVersionMessagesPromptsContentTypedDict",
|
|
1841
|
+
Union[str, List[GetPromptVersionContentPrompts2TypedDict]],
|
|
1842
|
+
)
|
|
1843
|
+
r"""The contents of the user message."""
|
|
1844
|
+
|
|
1845
|
+
|
|
1846
|
+
GetPromptVersionMessagesPromptsContent = TypeAliasType(
|
|
1847
|
+
"GetPromptVersionMessagesPromptsContent",
|
|
1848
|
+
Union[str, List[GetPromptVersionContentPrompts2]],
|
|
1849
|
+
)
|
|
1850
|
+
r"""The contents of the user message."""
|
|
1851
|
+
|
|
1852
|
+
|
|
1853
|
+
class GetPromptVersionMessagesUserMessageTypedDict(TypedDict):
|
|
1854
|
+
role: GetPromptVersionMessagesPromptsRole
|
|
1855
|
+
r"""The role of the messages author, in this case `user`."""
|
|
1856
|
+
content: GetPromptVersionMessagesPromptsContentTypedDict
|
|
1857
|
+
r"""The contents of the user message."""
|
|
1858
|
+
name: NotRequired[str]
|
|
1859
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1860
|
+
|
|
1861
|
+
|
|
1862
|
+
class GetPromptVersionMessagesUserMessage(BaseModel):
|
|
1863
|
+
role: GetPromptVersionMessagesPromptsRole
|
|
1864
|
+
r"""The role of the messages author, in this case `user`."""
|
|
1865
|
+
|
|
1866
|
+
content: GetPromptVersionMessagesPromptsContent
|
|
1867
|
+
r"""The contents of the user message."""
|
|
1868
|
+
|
|
1869
|
+
name: Optional[str] = None
|
|
1870
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1871
|
+
|
|
1872
|
+
@model_serializer(mode="wrap")
|
|
1873
|
+
def serialize_model(self, handler):
|
|
1874
|
+
optional_fields = set(["name"])
|
|
1875
|
+
serialized = handler(self)
|
|
1876
|
+
m = {}
|
|
1877
|
+
|
|
1878
|
+
for n, f in type(self).model_fields.items():
|
|
1879
|
+
k = f.alias or n
|
|
1880
|
+
val = serialized.get(k)
|
|
1881
|
+
|
|
1882
|
+
if val != UNSET_SENTINEL:
|
|
1883
|
+
if val is not None or k not in optional_fields:
|
|
1884
|
+
m[k] = val
|
|
1885
|
+
|
|
1886
|
+
return m
|
|
1887
|
+
|
|
1888
|
+
|
|
1889
|
+
GetPromptVersionMessagesRole = Literal["system",]
|
|
1890
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1891
|
+
|
|
1892
|
+
|
|
1893
|
+
GetPromptVersionMessagesContentTypedDict = TypeAliasType(
|
|
1894
|
+
"GetPromptVersionMessagesContentTypedDict",
|
|
1895
|
+
Union[str, List[TextContentPartSchemaTypedDict]],
|
|
1896
|
+
)
|
|
1897
|
+
r"""The contents of the system message."""
|
|
1898
|
+
|
|
1899
|
+
|
|
1900
|
+
GetPromptVersionMessagesContent = TypeAliasType(
|
|
1901
|
+
"GetPromptVersionMessagesContent", Union[str, List[TextContentPartSchema]]
|
|
1902
|
+
)
|
|
1903
|
+
r"""The contents of the system message."""
|
|
1904
|
+
|
|
1905
|
+
|
|
1906
|
+
class GetPromptVersionMessagesSystemMessageTypedDict(TypedDict):
|
|
1907
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
1908
|
+
|
|
1909
|
+
role: GetPromptVersionMessagesRole
|
|
1910
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1911
|
+
content: GetPromptVersionMessagesContentTypedDict
|
|
1912
|
+
r"""The contents of the system message."""
|
|
1913
|
+
name: NotRequired[str]
|
|
1914
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1915
|
+
|
|
1916
|
+
|
|
1917
|
+
class GetPromptVersionMessagesSystemMessage(BaseModel):
|
|
1918
|
+
r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
|
|
1919
|
+
|
|
1920
|
+
role: GetPromptVersionMessagesRole
|
|
1921
|
+
r"""The role of the messages author, in this case `system`."""
|
|
1922
|
+
|
|
1923
|
+
content: GetPromptVersionMessagesContent
|
|
1924
|
+
r"""The contents of the system message."""
|
|
1925
|
+
|
|
1926
|
+
name: Optional[str] = None
|
|
1927
|
+
r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
|
|
1928
|
+
|
|
1929
|
+
@model_serializer(mode="wrap")
|
|
1930
|
+
def serialize_model(self, handler):
|
|
1931
|
+
optional_fields = set(["name"])
|
|
1932
|
+
serialized = handler(self)
|
|
1933
|
+
m = {}
|
|
1934
|
+
|
|
1935
|
+
for n, f in type(self).model_fields.items():
|
|
1936
|
+
k = f.alias or n
|
|
1937
|
+
val = serialized.get(k)
|
|
1938
|
+
|
|
1939
|
+
if val != UNSET_SENTINEL:
|
|
1940
|
+
if val is not None or k not in optional_fields:
|
|
1941
|
+
m[k] = val
|
|
1942
|
+
|
|
1943
|
+
return m
|
|
1944
|
+
|
|
1945
|
+
|
|
1946
|
+
GetPromptVersionPromptsMessagesTypedDict = TypeAliasType(
|
|
1947
|
+
"GetPromptVersionPromptsMessagesTypedDict",
|
|
1948
|
+
Union[
|
|
1949
|
+
GetPromptVersionMessagesSystemMessageTypedDict,
|
|
1950
|
+
GetPromptVersionMessagesUserMessageTypedDict,
|
|
1951
|
+
GetPromptVersionMessagesToolMessageTypedDict,
|
|
1952
|
+
GetPromptVersionMessagesAssistantMessageTypedDict,
|
|
1953
|
+
],
|
|
1954
|
+
)
|
|
1955
|
+
|
|
1956
|
+
|
|
1957
|
+
GetPromptVersionPromptsMessages = Annotated[
|
|
1958
|
+
Union[
|
|
1959
|
+
Annotated[GetPromptVersionMessagesSystemMessage, Tag("system")],
|
|
1960
|
+
Annotated[GetPromptVersionMessagesUserMessage, Tag("user")],
|
|
1961
|
+
Annotated[GetPromptVersionMessagesAssistantMessage, Tag("assistant")],
|
|
1962
|
+
Annotated[GetPromptVersionMessagesToolMessage, Tag("tool")],
|
|
1963
|
+
],
|
|
1964
|
+
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
1965
|
+
]
|
|
1966
|
+
|
|
1967
|
+
|
|
1968
|
+
class GetPromptVersionPromptFieldTypedDict(TypedDict):
|
|
1969
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
1970
|
+
|
|
1971
|
+
audio: NotRequired[Nullable[GetPromptVersionAudioTypedDict]]
|
|
1972
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1973
|
+
frequency_penalty: NotRequired[Nullable[float]]
|
|
1974
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
1975
|
+
max_tokens: NotRequired[Nullable[int]]
|
|
1976
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1977
|
+
|
|
1978
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
1979
|
+
"""
|
|
1980
|
+
max_completion_tokens: NotRequired[Nullable[int]]
|
|
1981
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
1982
|
+
logprobs: NotRequired[Nullable[bool]]
|
|
1983
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
1984
|
+
top_logprobs: NotRequired[Nullable[int]]
|
|
1985
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
1986
|
+
n: NotRequired[Nullable[int]]
|
|
1987
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
1988
|
+
presence_penalty: NotRequired[Nullable[float]]
|
|
1989
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
1990
|
+
response_format: NotRequired[GetPromptVersionResponseFormatTypedDict]
|
|
1991
|
+
r"""An object specifying the format that the model must output"""
|
|
1992
|
+
reasoning_effort: NotRequired[GetPromptVersionReasoningEffort]
|
|
1993
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1994
|
+
|
|
1995
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1996
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1997
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1998
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1999
|
+
|
|
2000
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
2001
|
+
"""
|
|
2002
|
+
verbosity: NotRequired[str]
|
|
2003
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
2004
|
+
seed: NotRequired[Nullable[float]]
|
|
2005
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
2006
|
+
stop: NotRequired[Nullable[GetPromptVersionStopTypedDict]]
|
|
2007
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
2008
|
+
stream_options: NotRequired[Nullable[GetPromptVersionStreamOptionsTypedDict]]
|
|
2009
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
2010
|
+
thinking: NotRequired[GetPromptVersionThinkingTypedDict]
|
|
2011
|
+
temperature: NotRequired[Nullable[float]]
|
|
2012
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
2013
|
+
top_p: NotRequired[Nullable[float]]
|
|
2014
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
2015
|
+
top_k: NotRequired[Nullable[float]]
|
|
2016
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
2017
|
+
tool_choice: NotRequired[GetPromptVersionToolChoiceTypedDict]
|
|
2018
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
2019
|
+
parallel_tool_calls: NotRequired[bool]
|
|
2020
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
2021
|
+
modalities: NotRequired[Nullable[List[GetPromptVersionModalities]]]
|
|
2022
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
2023
|
+
guardrails: NotRequired[List[GetPromptVersionGuardrailsTypedDict]]
|
|
2024
|
+
r"""A list of guardrails to apply to the request."""
|
|
2025
|
+
fallbacks: NotRequired[List[GetPromptVersionFallbacksTypedDict]]
|
|
2026
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2027
|
+
retry: NotRequired[GetPromptVersionRetryTypedDict]
|
|
2028
|
+
r"""Retry configuration for the request"""
|
|
2029
|
+
cache: NotRequired[GetPromptVersionCacheTypedDict]
|
|
2030
|
+
r"""Cache configuration for the request."""
|
|
2031
|
+
load_balancer: NotRequired[GetPromptVersionLoadBalancerTypedDict]
|
|
2032
|
+
r"""Load balancer configuration for the request."""
|
|
2033
|
+
timeout: NotRequired[GetPromptVersionTimeoutTypedDict]
|
|
2034
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2035
|
+
messages: NotRequired[List[GetPromptVersionPromptsMessagesTypedDict]]
|
|
2036
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
2037
|
+
model: NotRequired[Nullable[str]]
|
|
2038
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
|
|
2039
|
+
version: NotRequired[str]
|
|
2040
|
+
|
|
2041
|
+
|
|
2042
|
+
class GetPromptVersionPromptField(BaseModel):
|
|
2043
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2044
|
+
|
|
2045
|
+
audio: OptionalNullable[GetPromptVersionAudio] = UNSET
|
|
2046
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
2047
|
+
|
|
2048
|
+
frequency_penalty: OptionalNullable[float] = UNSET
|
|
2049
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
2050
|
+
|
|
2051
|
+
max_tokens: OptionalNullable[int] = UNSET
|
|
2052
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
2053
|
+
|
|
2054
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
2055
|
+
"""
|
|
2056
|
+
|
|
2057
|
+
max_completion_tokens: OptionalNullable[int] = UNSET
|
|
2058
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
2059
|
+
|
|
2060
|
+
logprobs: OptionalNullable[bool] = UNSET
|
|
2061
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
2062
|
+
|
|
2063
|
+
top_logprobs: OptionalNullable[int] = UNSET
|
|
2064
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
2065
|
+
|
|
2066
|
+
n: OptionalNullable[int] = UNSET
|
|
2067
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
2068
|
+
|
|
2069
|
+
presence_penalty: OptionalNullable[float] = UNSET
|
|
2070
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
2071
|
+
|
|
2072
|
+
response_format: Optional[GetPromptVersionResponseFormat] = None
|
|
2073
|
+
r"""An object specifying the format that the model must output"""
|
|
2074
|
+
|
|
2075
|
+
reasoning_effort: Optional[GetPromptVersionReasoningEffort] = None
|
|
2076
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
2077
|
+
|
|
2078
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
2079
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
2080
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
2081
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
2082
|
+
|
|
2083
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
2084
|
+
"""
|
|
2085
|
+
|
|
2086
|
+
verbosity: Optional[str] = None
|
|
2087
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
2088
|
+
|
|
2089
|
+
seed: OptionalNullable[float] = UNSET
|
|
2090
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
2091
|
+
|
|
2092
|
+
stop: OptionalNullable[GetPromptVersionStop] = UNSET
|
|
2093
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
2094
|
+
|
|
2095
|
+
stream_options: OptionalNullable[GetPromptVersionStreamOptions] = UNSET
|
|
2096
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
2097
|
+
|
|
2098
|
+
thinking: Optional[GetPromptVersionThinking] = None
|
|
2099
|
+
|
|
2100
|
+
temperature: OptionalNullable[float] = UNSET
|
|
2101
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
2102
|
+
|
|
2103
|
+
top_p: OptionalNullable[float] = UNSET
|
|
2104
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
2105
|
+
|
|
2106
|
+
top_k: OptionalNullable[float] = UNSET
|
|
2107
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
2108
|
+
|
|
2109
|
+
tool_choice: Optional[GetPromptVersionToolChoice] = None
|
|
2110
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
2111
|
+
|
|
2112
|
+
parallel_tool_calls: Optional[bool] = None
|
|
2113
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
2114
|
+
|
|
2115
|
+
modalities: OptionalNullable[List[GetPromptVersionModalities]] = UNSET
|
|
2116
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
2117
|
+
|
|
2118
|
+
guardrails: Optional[List[GetPromptVersionGuardrails]] = None
|
|
2119
|
+
r"""A list of guardrails to apply to the request."""
|
|
2120
|
+
|
|
2121
|
+
fallbacks: Optional[List[GetPromptVersionFallbacks]] = None
|
|
2122
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
2123
|
+
|
|
2124
|
+
retry: Optional[GetPromptVersionRetry] = None
|
|
2125
|
+
r"""Retry configuration for the request"""
|
|
2126
|
+
|
|
2127
|
+
cache: Optional[GetPromptVersionCache] = None
|
|
2128
|
+
r"""Cache configuration for the request."""
|
|
2129
|
+
|
|
2130
|
+
load_balancer: Optional[GetPromptVersionLoadBalancer] = None
|
|
2131
|
+
r"""Load balancer configuration for the request."""
|
|
2132
|
+
|
|
2133
|
+
timeout: Optional[GetPromptVersionTimeout] = None
|
|
2134
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2135
|
+
|
|
2136
|
+
messages: Optional[List[GetPromptVersionPromptsMessages]] = None
|
|
2137
|
+
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
2138
|
+
|
|
2139
|
+
model: OptionalNullable[str] = UNSET
|
|
2140
|
+
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
|
|
2141
|
+
|
|
2142
|
+
version: Optional[str] = None
|
|
2143
|
+
|
|
2144
|
+
@model_serializer(mode="wrap")
|
|
2145
|
+
def serialize_model(self, handler):
|
|
2146
|
+
optional_fields = set(
|
|
2147
|
+
[
|
|
2148
|
+
"audio",
|
|
2149
|
+
"frequency_penalty",
|
|
2150
|
+
"max_tokens",
|
|
2151
|
+
"max_completion_tokens",
|
|
2152
|
+
"logprobs",
|
|
2153
|
+
"top_logprobs",
|
|
2154
|
+
"n",
|
|
2155
|
+
"presence_penalty",
|
|
2156
|
+
"response_format",
|
|
2157
|
+
"reasoning_effort",
|
|
2158
|
+
"verbosity",
|
|
2159
|
+
"seed",
|
|
2160
|
+
"stop",
|
|
2161
|
+
"stream_options",
|
|
2162
|
+
"thinking",
|
|
2163
|
+
"temperature",
|
|
2164
|
+
"top_p",
|
|
2165
|
+
"top_k",
|
|
2166
|
+
"tool_choice",
|
|
2167
|
+
"parallel_tool_calls",
|
|
2168
|
+
"modalities",
|
|
2169
|
+
"guardrails",
|
|
2170
|
+
"fallbacks",
|
|
2171
|
+
"retry",
|
|
2172
|
+
"cache",
|
|
2173
|
+
"load_balancer",
|
|
2174
|
+
"timeout",
|
|
2175
|
+
"messages",
|
|
2176
|
+
"model",
|
|
2177
|
+
"version",
|
|
2178
|
+
]
|
|
2179
|
+
)
|
|
2180
|
+
nullable_fields = set(
|
|
2181
|
+
[
|
|
2182
|
+
"audio",
|
|
2183
|
+
"frequency_penalty",
|
|
2184
|
+
"max_tokens",
|
|
2185
|
+
"max_completion_tokens",
|
|
2186
|
+
"logprobs",
|
|
2187
|
+
"top_logprobs",
|
|
2188
|
+
"n",
|
|
2189
|
+
"presence_penalty",
|
|
2190
|
+
"seed",
|
|
2191
|
+
"stop",
|
|
2192
|
+
"stream_options",
|
|
2193
|
+
"temperature",
|
|
2194
|
+
"top_p",
|
|
2195
|
+
"top_k",
|
|
2196
|
+
"modalities",
|
|
2197
|
+
"model",
|
|
2198
|
+
]
|
|
2199
|
+
)
|
|
2200
|
+
serialized = handler(self)
|
|
2201
|
+
m = {}
|
|
2202
|
+
|
|
2203
|
+
for n, f in type(self).model_fields.items():
|
|
2204
|
+
k = f.alias or n
|
|
2205
|
+
val = serialized.get(k)
|
|
2206
|
+
is_nullable_and_explicitly_set = (
|
|
2207
|
+
k in nullable_fields
|
|
2208
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2209
|
+
)
|
|
2210
|
+
|
|
2211
|
+
if val != UNSET_SENTINEL:
|
|
2212
|
+
if (
|
|
2213
|
+
val is not None
|
|
2214
|
+
or k not in optional_fields
|
|
2215
|
+
or is_nullable_and_explicitly_set
|
|
2216
|
+
):
|
|
2217
|
+
m[k] = val
|
|
2218
|
+
|
|
2219
|
+
return m
|
|
2220
|
+
|
|
2221
|
+
|
|
2222
|
+
GetPromptVersionUseCases = Literal[
|
|
2223
|
+
"Agents simulations",
|
|
2224
|
+
"Agents",
|
|
2225
|
+
"API interaction",
|
|
2226
|
+
"Autonomous Agents",
|
|
2227
|
+
"Chatbots",
|
|
2228
|
+
"Classification",
|
|
2229
|
+
"Code understanding",
|
|
2230
|
+
"Code writing",
|
|
2231
|
+
"Conversation",
|
|
2232
|
+
"Documents QA",
|
|
2233
|
+
"Evaluation",
|
|
2234
|
+
"Extraction",
|
|
2235
|
+
"Multi-modal",
|
|
2236
|
+
"Self-checking",
|
|
2237
|
+
"Sentiment analysis",
|
|
2238
|
+
"SQL",
|
|
2239
|
+
"Summarization",
|
|
2240
|
+
"Tagging",
|
|
2241
|
+
"Translation (document)",
|
|
2242
|
+
"Translation (sentences)",
|
|
2243
|
+
]
|
|
2244
|
+
|
|
2245
|
+
|
|
2246
|
+
GetPromptVersionLanguage = Literal[
|
|
2247
|
+
"Chinese",
|
|
2248
|
+
"Dutch",
|
|
2249
|
+
"English",
|
|
2250
|
+
"French",
|
|
2251
|
+
"German",
|
|
2252
|
+
"Russian",
|
|
2253
|
+
"Spanish",
|
|
2254
|
+
]
|
|
2255
|
+
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
2256
|
+
|
|
2257
|
+
|
|
2258
|
+
class GetPromptVersionMetadataTypedDict(TypedDict):
|
|
2259
|
+
use_cases: NotRequired[List[GetPromptVersionUseCases]]
|
|
2260
|
+
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
2261
|
+
language: NotRequired[Nullable[GetPromptVersionLanguage]]
|
|
2262
|
+
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
2263
|
+
|
|
2264
|
+
|
|
2265
|
+
class GetPromptVersionMetadata(BaseModel):
|
|
2266
|
+
use_cases: Optional[List[GetPromptVersionUseCases]] = None
|
|
2267
|
+
r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
|
|
2268
|
+
|
|
2269
|
+
language: OptionalNullable[GetPromptVersionLanguage] = UNSET
|
|
2270
|
+
r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
|
|
2271
|
+
|
|
2272
|
+
@model_serializer(mode="wrap")
|
|
2273
|
+
def serialize_model(self, handler):
|
|
2274
|
+
optional_fields = set(["use_cases", "language"])
|
|
2275
|
+
nullable_fields = set(["language"])
|
|
2276
|
+
serialized = handler(self)
|
|
2277
|
+
m = {}
|
|
2278
|
+
|
|
2279
|
+
for n, f in type(self).model_fields.items():
|
|
2280
|
+
k = f.alias or n
|
|
2281
|
+
val = serialized.get(k)
|
|
2282
|
+
is_nullable_and_explicitly_set = (
|
|
2283
|
+
k in nullable_fields
|
|
2284
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2285
|
+
)
|
|
2286
|
+
|
|
2287
|
+
if val != UNSET_SENTINEL:
|
|
2288
|
+
if (
|
|
2289
|
+
val is not None
|
|
2290
|
+
or k not in optional_fields
|
|
2291
|
+
or is_nullable_and_explicitly_set
|
|
2292
|
+
):
|
|
2293
|
+
m[k] = val
|
|
2294
|
+
|
|
2295
|
+
return m
|
|
2296
|
+
|
|
2297
|
+
|
|
2298
|
+
class GetPromptVersionResponseBodyTypedDict(TypedDict):
|
|
2299
|
+
r"""Prompt version retrieved successfully."""
|
|
2300
|
+
|
|
2301
|
+
id: str
|
|
2302
|
+
prompt: GetPromptVersionPromptFieldTypedDict
|
|
2303
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2304
|
+
timestamp: str
|
|
2305
|
+
created_by_id: NotRequired[Nullable[str]]
|
|
2306
|
+
updated_by_id: NotRequired[Nullable[str]]
|
|
2307
|
+
description: NotRequired[Nullable[str]]
|
|
2308
|
+
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
2309
|
+
prompt_config: NotRequired[GetPromptVersionPromptConfigTypedDict]
|
|
2310
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
2311
|
+
metadata: NotRequired[GetPromptVersionMetadataTypedDict]
|
|
2312
|
+
|
|
2313
|
+
|
|
2314
|
+
class GetPromptVersionResponseBody(BaseModel):
|
|
2315
|
+
r"""Prompt version retrieved successfully."""
|
|
2316
|
+
|
|
2317
|
+
id: Annotated[str, pydantic.Field(alias="_id")]
|
|
2318
|
+
|
|
2319
|
+
prompt: GetPromptVersionPromptField
|
|
2320
|
+
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
2321
|
+
|
|
2322
|
+
timestamp: str
|
|
2323
|
+
|
|
2324
|
+
created_by_id: OptionalNullable[str] = UNSET
|
|
2325
|
+
|
|
2326
|
+
updated_by_id: OptionalNullable[str] = UNSET
|
|
2327
|
+
|
|
2328
|
+
description: OptionalNullable[str] = UNSET
|
|
2329
|
+
r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
|
|
2330
|
+
|
|
2331
|
+
prompt_config: Annotated[
|
|
2332
|
+
Optional[GetPromptVersionPromptConfig],
|
|
2333
|
+
pydantic.Field(
|
|
2334
|
+
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
2335
|
+
),
|
|
2336
|
+
] = None
|
|
2337
|
+
r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
|
|
2338
|
+
|
|
2339
|
+
metadata: Optional[GetPromptVersionMetadata] = None
|
|
2340
|
+
|
|
2341
|
+
@model_serializer(mode="wrap")
|
|
2342
|
+
def serialize_model(self, handler):
|
|
2343
|
+
optional_fields = set(
|
|
2344
|
+
[
|
|
2345
|
+
"created_by_id",
|
|
2346
|
+
"updated_by_id",
|
|
2347
|
+
"description",
|
|
2348
|
+
"prompt_config",
|
|
2349
|
+
"metadata",
|
|
2350
|
+
]
|
|
2351
|
+
)
|
|
2352
|
+
nullable_fields = set(["created_by_id", "updated_by_id", "description"])
|
|
2353
|
+
serialized = handler(self)
|
|
898
2354
|
m = {}
|
|
899
2355
|
|
|
900
2356
|
for n, f in type(self).model_fields.items():
|
|
901
2357
|
k = f.alias or n
|
|
902
2358
|
val = serialized.get(k)
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
):
|
|
916
|
-
m[k] = val
|
|
2359
|
+
is_nullable_and_explicitly_set = (
|
|
2360
|
+
k in nullable_fields
|
|
2361
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2362
|
+
)
|
|
2363
|
+
|
|
2364
|
+
if val != UNSET_SENTINEL:
|
|
2365
|
+
if (
|
|
2366
|
+
val is not None
|
|
2367
|
+
or k not in optional_fields
|
|
2368
|
+
or is_nullable_and_explicitly_set
|
|
2369
|
+
):
|
|
2370
|
+
m[k] = val
|
|
917
2371
|
|
|
918
2372
|
return m
|