orq-ai-sdk 4.2.0rc49__py3-none-any.whl → 4.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -886
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +3839 -424
- orq_ai_sdk/models/conversationresponse.py +1 -1
- orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
- orq_ai_sdk/models/createagentrequestop.py +768 -12
- orq_ai_sdk/models/createagentresponse.py +68 -2
- orq_ai_sdk/models/createchatcompletionop.py +538 -313
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +5 -10
- orq_ai_sdk/models/createconversationop.py +1 -1
- orq_ai_sdk/models/createconversationresponseop.py +2 -2
- orq_ai_sdk/models/createdatasetitemop.py +4 -4
- orq_ai_sdk/models/createdatasetop.py +1 -1
- orq_ai_sdk/models/createdatasourceop.py +1 -1
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +14 -14
- orq_ai_sdk/models/createidentityop.py +1 -1
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +228 -82
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/creatememoryop.py +4 -2
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +375 -6
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +6 -6
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
- orq_ai_sdk/models/deploymentsop.py +1 -0
- orq_ai_sdk/models/deploymentstreamop.py +7 -0
- orq_ai_sdk/models/filegetop.py +1 -1
- orq_ai_sdk/models/filelistop.py +1 -1
- orq_ai_sdk/models/fileuploadop.py +1 -1
- orq_ai_sdk/models/generateconversationnameop.py +1 -1
- orq_ai_sdk/models/getallmemoriesop.py +4 -2
- orq_ai_sdk/models/getallpromptsop.py +188 -3
- orq_ai_sdk/models/getalltoolsop.py +6 -6
- orq_ai_sdk/models/getevalsop.py +17 -17
- orq_ai_sdk/models/getonepromptop.py +188 -3
- orq_ai_sdk/models/getpromptversionop.py +188 -3
- orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
- orq_ai_sdk/models/listagentsop.py +372 -0
- orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
- orq_ai_sdk/models/listdatasetsop.py +1 -1
- orq_ai_sdk/models/listdatasourcesop.py +1 -1
- orq_ai_sdk/models/listidentitiesop.py +1 -1
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +188 -3
- orq_ai_sdk/models/partdoneevent.py +1 -1
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +9 -3
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +1 -1
- orq_ai_sdk/models/responsedoneevent.py +14 -11
- orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
- orq_ai_sdk/models/retrievedatapointop.py +4 -4
- orq_ai_sdk/models/retrievedatasetop.py +1 -1
- orq_ai_sdk/models/retrievedatasourceop.py +1 -1
- orq_ai_sdk/models/retrieveidentityop.py +1 -1
- orq_ai_sdk/models/retrievememoryop.py +4 -2
- orq_ai_sdk/models/retrievetoolop.py +6 -6
- orq_ai_sdk/models/runagentop.py +379 -9
- orq_ai_sdk/models/streamrunagentop.py +385 -9
- orq_ai_sdk/models/updateagentop.py +770 -12
- orq_ai_sdk/models/updateconversationop.py +1 -1
- orq_ai_sdk/models/updatedatapointop.py +4 -4
- orq_ai_sdk/models/updatedatasetop.py +1 -1
- orq_ai_sdk/models/updatedatasourceop.py +1 -1
- orq_ai_sdk/models/updateevalop.py +14 -14
- orq_ai_sdk/models/updateidentityop.py +1 -1
- orq_ai_sdk/models/updatememoryop.py +4 -2
- orq_ai_sdk/models/updatepromptop.py +375 -6
- orq_ai_sdk/models/updatetoolop.py +7 -7
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/variations.py +364 -0
- orq_ai_sdk-4.2.15.dist-info/METADATA +888 -0
- {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/RECORD +99 -76
- {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/WHEEL +1 -1
- orq_ai_sdk/models/deletecontactop.py +0 -44
- orq_ai_sdk/models/listcontactsop.py +0 -265
- orq_ai_sdk/models/retrievecontactop.py +0 -142
- orq_ai_sdk/models/updatecontactop.py +0 -233
- orq_ai_sdk-4.2.0rc49.dist-info/METADATA +0 -788
- {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/top_level.txt +0 -0
|
@@ -973,6 +973,154 @@ class UpdatePromptGuardrails(BaseModel):
|
|
|
973
973
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
974
974
|
|
|
975
975
|
|
|
976
|
+
class UpdatePromptFallbacksTypedDict(TypedDict):
|
|
977
|
+
model: str
|
|
978
|
+
r"""Fallback model identifier"""
|
|
979
|
+
|
|
980
|
+
|
|
981
|
+
class UpdatePromptFallbacks(BaseModel):
|
|
982
|
+
model: str
|
|
983
|
+
r"""Fallback model identifier"""
|
|
984
|
+
|
|
985
|
+
|
|
986
|
+
class UpdatePromptRetryTypedDict(TypedDict):
|
|
987
|
+
r"""Retry configuration for the request"""
|
|
988
|
+
|
|
989
|
+
count: NotRequired[float]
|
|
990
|
+
r"""Number of retry attempts (1-5)"""
|
|
991
|
+
on_codes: NotRequired[List[float]]
|
|
992
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
993
|
+
|
|
994
|
+
|
|
995
|
+
class UpdatePromptRetry(BaseModel):
|
|
996
|
+
r"""Retry configuration for the request"""
|
|
997
|
+
|
|
998
|
+
count: Optional[float] = 3
|
|
999
|
+
r"""Number of retry attempts (1-5)"""
|
|
1000
|
+
|
|
1001
|
+
on_codes: Optional[List[float]] = None
|
|
1002
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1003
|
+
|
|
1004
|
+
@model_serializer(mode="wrap")
|
|
1005
|
+
def serialize_model(self, handler):
|
|
1006
|
+
optional_fields = set(["count", "on_codes"])
|
|
1007
|
+
serialized = handler(self)
|
|
1008
|
+
m = {}
|
|
1009
|
+
|
|
1010
|
+
for n, f in type(self).model_fields.items():
|
|
1011
|
+
k = f.alias or n
|
|
1012
|
+
val = serialized.get(k)
|
|
1013
|
+
|
|
1014
|
+
if val != UNSET_SENTINEL:
|
|
1015
|
+
if val is not None or k not in optional_fields:
|
|
1016
|
+
m[k] = val
|
|
1017
|
+
|
|
1018
|
+
return m
|
|
1019
|
+
|
|
1020
|
+
|
|
1021
|
+
UpdatePromptType = Literal["exact_match",]
|
|
1022
|
+
|
|
1023
|
+
|
|
1024
|
+
class UpdatePromptCacheTypedDict(TypedDict):
|
|
1025
|
+
r"""Cache configuration for the request."""
|
|
1026
|
+
|
|
1027
|
+
type: UpdatePromptType
|
|
1028
|
+
ttl: NotRequired[float]
|
|
1029
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1030
|
+
|
|
1031
|
+
|
|
1032
|
+
class UpdatePromptCache(BaseModel):
|
|
1033
|
+
r"""Cache configuration for the request."""
|
|
1034
|
+
|
|
1035
|
+
type: UpdatePromptType
|
|
1036
|
+
|
|
1037
|
+
ttl: Optional[float] = 1800
|
|
1038
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1039
|
+
|
|
1040
|
+
@model_serializer(mode="wrap")
|
|
1041
|
+
def serialize_model(self, handler):
|
|
1042
|
+
optional_fields = set(["ttl"])
|
|
1043
|
+
serialized = handler(self)
|
|
1044
|
+
m = {}
|
|
1045
|
+
|
|
1046
|
+
for n, f in type(self).model_fields.items():
|
|
1047
|
+
k = f.alias or n
|
|
1048
|
+
val = serialized.get(k)
|
|
1049
|
+
|
|
1050
|
+
if val != UNSET_SENTINEL:
|
|
1051
|
+
if val is not None or k not in optional_fields:
|
|
1052
|
+
m[k] = val
|
|
1053
|
+
|
|
1054
|
+
return m
|
|
1055
|
+
|
|
1056
|
+
|
|
1057
|
+
UpdatePromptLoadBalancerType = Literal["weight_based",]
|
|
1058
|
+
|
|
1059
|
+
|
|
1060
|
+
class UpdatePromptLoadBalancerModelsTypedDict(TypedDict):
|
|
1061
|
+
model: str
|
|
1062
|
+
r"""Model identifier for load balancing"""
|
|
1063
|
+
weight: NotRequired[float]
|
|
1064
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1065
|
+
|
|
1066
|
+
|
|
1067
|
+
class UpdatePromptLoadBalancerModels(BaseModel):
|
|
1068
|
+
model: str
|
|
1069
|
+
r"""Model identifier for load balancing"""
|
|
1070
|
+
|
|
1071
|
+
weight: Optional[float] = 0.5
|
|
1072
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1073
|
+
|
|
1074
|
+
@model_serializer(mode="wrap")
|
|
1075
|
+
def serialize_model(self, handler):
|
|
1076
|
+
optional_fields = set(["weight"])
|
|
1077
|
+
serialized = handler(self)
|
|
1078
|
+
m = {}
|
|
1079
|
+
|
|
1080
|
+
for n, f in type(self).model_fields.items():
|
|
1081
|
+
k = f.alias or n
|
|
1082
|
+
val = serialized.get(k)
|
|
1083
|
+
|
|
1084
|
+
if val != UNSET_SENTINEL:
|
|
1085
|
+
if val is not None or k not in optional_fields:
|
|
1086
|
+
m[k] = val
|
|
1087
|
+
|
|
1088
|
+
return m
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
class UpdatePromptLoadBalancer1TypedDict(TypedDict):
|
|
1092
|
+
type: UpdatePromptLoadBalancerType
|
|
1093
|
+
models: List[UpdatePromptLoadBalancerModelsTypedDict]
|
|
1094
|
+
|
|
1095
|
+
|
|
1096
|
+
class UpdatePromptLoadBalancer1(BaseModel):
|
|
1097
|
+
type: UpdatePromptLoadBalancerType
|
|
1098
|
+
|
|
1099
|
+
models: List[UpdatePromptLoadBalancerModels]
|
|
1100
|
+
|
|
1101
|
+
|
|
1102
|
+
UpdatePromptLoadBalancerTypedDict = UpdatePromptLoadBalancer1TypedDict
|
|
1103
|
+
r"""Load balancer configuration for the request."""
|
|
1104
|
+
|
|
1105
|
+
|
|
1106
|
+
UpdatePromptLoadBalancer = UpdatePromptLoadBalancer1
|
|
1107
|
+
r"""Load balancer configuration for the request."""
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
class UpdatePromptTimeoutTypedDict(TypedDict):
|
|
1111
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1112
|
+
|
|
1113
|
+
call_timeout: float
|
|
1114
|
+
r"""Timeout value in milliseconds"""
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
class UpdatePromptTimeout(BaseModel):
|
|
1118
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1119
|
+
|
|
1120
|
+
call_timeout: float
|
|
1121
|
+
r"""Timeout value in milliseconds"""
|
|
1122
|
+
|
|
1123
|
+
|
|
976
1124
|
class UpdatePromptPromptInputTypedDict(TypedDict):
|
|
977
1125
|
r"""Prompt configuration with model and messages. Use this to update the prompt."""
|
|
978
1126
|
|
|
@@ -980,6 +1128,8 @@ class UpdatePromptPromptInputTypedDict(TypedDict):
|
|
|
980
1128
|
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
981
1129
|
model: NotRequired[Nullable[str]]
|
|
982
1130
|
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
1131
|
+
name: NotRequired[str]
|
|
1132
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
983
1133
|
audio: NotRequired[Nullable[UpdatePromptAudioTypedDict]]
|
|
984
1134
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
985
1135
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -1034,6 +1184,16 @@ class UpdatePromptPromptInputTypedDict(TypedDict):
|
|
|
1034
1184
|
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
1035
1185
|
guardrails: NotRequired[List[UpdatePromptGuardrailsTypedDict]]
|
|
1036
1186
|
r"""A list of guardrails to apply to the request."""
|
|
1187
|
+
fallbacks: NotRequired[List[UpdatePromptFallbacksTypedDict]]
|
|
1188
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1189
|
+
retry: NotRequired[UpdatePromptRetryTypedDict]
|
|
1190
|
+
r"""Retry configuration for the request"""
|
|
1191
|
+
cache: NotRequired[UpdatePromptCacheTypedDict]
|
|
1192
|
+
r"""Cache configuration for the request."""
|
|
1193
|
+
load_balancer: NotRequired[UpdatePromptLoadBalancerTypedDict]
|
|
1194
|
+
r"""Load balancer configuration for the request."""
|
|
1195
|
+
timeout: NotRequired[UpdatePromptTimeoutTypedDict]
|
|
1196
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1037
1197
|
|
|
1038
1198
|
|
|
1039
1199
|
class UpdatePromptPromptInput(BaseModel):
|
|
@@ -1045,6 +1205,9 @@ class UpdatePromptPromptInput(BaseModel):
|
|
|
1045
1205
|
model: OptionalNullable[str] = UNSET
|
|
1046
1206
|
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
1047
1207
|
|
|
1208
|
+
name: Optional[str] = None
|
|
1209
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1210
|
+
|
|
1048
1211
|
audio: OptionalNullable[UpdatePromptAudio] = UNSET
|
|
1049
1212
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1050
1213
|
|
|
@@ -1121,12 +1284,28 @@ class UpdatePromptPromptInput(BaseModel):
|
|
|
1121
1284
|
guardrails: Optional[List[UpdatePromptGuardrails]] = None
|
|
1122
1285
|
r"""A list of guardrails to apply to the request."""
|
|
1123
1286
|
|
|
1287
|
+
fallbacks: Optional[List[UpdatePromptFallbacks]] = None
|
|
1288
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1289
|
+
|
|
1290
|
+
retry: Optional[UpdatePromptRetry] = None
|
|
1291
|
+
r"""Retry configuration for the request"""
|
|
1292
|
+
|
|
1293
|
+
cache: Optional[UpdatePromptCache] = None
|
|
1294
|
+
r"""Cache configuration for the request."""
|
|
1295
|
+
|
|
1296
|
+
load_balancer: Optional[UpdatePromptLoadBalancer] = None
|
|
1297
|
+
r"""Load balancer configuration for the request."""
|
|
1298
|
+
|
|
1299
|
+
timeout: Optional[UpdatePromptTimeout] = None
|
|
1300
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1301
|
+
|
|
1124
1302
|
@model_serializer(mode="wrap")
|
|
1125
1303
|
def serialize_model(self, handler):
|
|
1126
1304
|
optional_fields = set(
|
|
1127
1305
|
[
|
|
1128
1306
|
"messages",
|
|
1129
1307
|
"model",
|
|
1308
|
+
"name",
|
|
1130
1309
|
"audio",
|
|
1131
1310
|
"frequency_penalty",
|
|
1132
1311
|
"max_tokens",
|
|
@@ -1149,6 +1328,11 @@ class UpdatePromptPromptInput(BaseModel):
|
|
|
1149
1328
|
"parallel_tool_calls",
|
|
1150
1329
|
"modalities",
|
|
1151
1330
|
"guardrails",
|
|
1331
|
+
"fallbacks",
|
|
1332
|
+
"retry",
|
|
1333
|
+
"cache",
|
|
1334
|
+
"load_balancer",
|
|
1335
|
+
"timeout",
|
|
1152
1336
|
]
|
|
1153
1337
|
)
|
|
1154
1338
|
nullable_fields = set(
|
|
@@ -1420,7 +1604,7 @@ class UpdatePromptResponseBody(OrqError):
|
|
|
1420
1604
|
object.__setattr__(self, "data", data)
|
|
1421
1605
|
|
|
1422
1606
|
|
|
1423
|
-
|
|
1607
|
+
UpdatePromptPromptsType = Literal["prompt",]
|
|
1424
1608
|
|
|
1425
1609
|
|
|
1426
1610
|
UpdatePromptModelType = Literal[
|
|
@@ -1431,6 +1615,7 @@ UpdatePromptModelType = Literal[
|
|
|
1431
1615
|
"tts",
|
|
1432
1616
|
"stt",
|
|
1433
1617
|
"rerank",
|
|
1618
|
+
"ocr",
|
|
1434
1619
|
"moderation",
|
|
1435
1620
|
"vision",
|
|
1436
1621
|
]
|
|
@@ -2038,7 +2223,7 @@ UpdatePromptContent = TypeAliasType(
|
|
|
2038
2223
|
r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
|
|
2039
2224
|
|
|
2040
2225
|
|
|
2041
|
-
|
|
2226
|
+
UpdatePromptPromptsResponse200Type = Literal["function",]
|
|
2042
2227
|
|
|
2043
2228
|
|
|
2044
2229
|
class UpdatePromptFunctionTypedDict(TypedDict):
|
|
@@ -2055,14 +2240,14 @@ class UpdatePromptFunction(BaseModel):
|
|
|
2055
2240
|
|
|
2056
2241
|
|
|
2057
2242
|
class UpdatePromptToolCallsTypedDict(TypedDict):
|
|
2058
|
-
type:
|
|
2243
|
+
type: UpdatePromptPromptsResponse200Type
|
|
2059
2244
|
function: UpdatePromptFunctionTypedDict
|
|
2060
2245
|
id: NotRequired[str]
|
|
2061
2246
|
index: NotRequired[float]
|
|
2062
2247
|
|
|
2063
2248
|
|
|
2064
2249
|
class UpdatePromptToolCalls(BaseModel):
|
|
2065
|
-
type:
|
|
2250
|
+
type: UpdatePromptPromptsResponse200Type
|
|
2066
2251
|
|
|
2067
2252
|
function: UpdatePromptFunction
|
|
2068
2253
|
|
|
@@ -2580,6 +2765,154 @@ class UpdatePromptPromptsGuardrails(BaseModel):
|
|
|
2580
2765
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
2581
2766
|
|
|
2582
2767
|
|
|
2768
|
+
class UpdatePromptPromptsFallbacksTypedDict(TypedDict):
|
|
2769
|
+
model: str
|
|
2770
|
+
r"""Fallback model identifier"""
|
|
2771
|
+
|
|
2772
|
+
|
|
2773
|
+
class UpdatePromptPromptsFallbacks(BaseModel):
|
|
2774
|
+
model: str
|
|
2775
|
+
r"""Fallback model identifier"""
|
|
2776
|
+
|
|
2777
|
+
|
|
2778
|
+
class UpdatePromptPromptsRetryTypedDict(TypedDict):
|
|
2779
|
+
r"""Retry configuration for the request"""
|
|
2780
|
+
|
|
2781
|
+
count: NotRequired[float]
|
|
2782
|
+
r"""Number of retry attempts (1-5)"""
|
|
2783
|
+
on_codes: NotRequired[List[float]]
|
|
2784
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
2785
|
+
|
|
2786
|
+
|
|
2787
|
+
class UpdatePromptPromptsRetry(BaseModel):
|
|
2788
|
+
r"""Retry configuration for the request"""
|
|
2789
|
+
|
|
2790
|
+
count: Optional[float] = 3
|
|
2791
|
+
r"""Number of retry attempts (1-5)"""
|
|
2792
|
+
|
|
2793
|
+
on_codes: Optional[List[float]] = None
|
|
2794
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
2795
|
+
|
|
2796
|
+
@model_serializer(mode="wrap")
|
|
2797
|
+
def serialize_model(self, handler):
|
|
2798
|
+
optional_fields = set(["count", "on_codes"])
|
|
2799
|
+
serialized = handler(self)
|
|
2800
|
+
m = {}
|
|
2801
|
+
|
|
2802
|
+
for n, f in type(self).model_fields.items():
|
|
2803
|
+
k = f.alias or n
|
|
2804
|
+
val = serialized.get(k)
|
|
2805
|
+
|
|
2806
|
+
if val != UNSET_SENTINEL:
|
|
2807
|
+
if val is not None or k not in optional_fields:
|
|
2808
|
+
m[k] = val
|
|
2809
|
+
|
|
2810
|
+
return m
|
|
2811
|
+
|
|
2812
|
+
|
|
2813
|
+
UpdatePromptPromptsResponseType = Literal["exact_match",]
|
|
2814
|
+
|
|
2815
|
+
|
|
2816
|
+
class UpdatePromptPromptsCacheTypedDict(TypedDict):
|
|
2817
|
+
r"""Cache configuration for the request."""
|
|
2818
|
+
|
|
2819
|
+
type: UpdatePromptPromptsResponseType
|
|
2820
|
+
ttl: NotRequired[float]
|
|
2821
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
2822
|
+
|
|
2823
|
+
|
|
2824
|
+
class UpdatePromptPromptsCache(BaseModel):
|
|
2825
|
+
r"""Cache configuration for the request."""
|
|
2826
|
+
|
|
2827
|
+
type: UpdatePromptPromptsResponseType
|
|
2828
|
+
|
|
2829
|
+
ttl: Optional[float] = 1800
|
|
2830
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
2831
|
+
|
|
2832
|
+
@model_serializer(mode="wrap")
|
|
2833
|
+
def serialize_model(self, handler):
|
|
2834
|
+
optional_fields = set(["ttl"])
|
|
2835
|
+
serialized = handler(self)
|
|
2836
|
+
m = {}
|
|
2837
|
+
|
|
2838
|
+
for n, f in type(self).model_fields.items():
|
|
2839
|
+
k = f.alias or n
|
|
2840
|
+
val = serialized.get(k)
|
|
2841
|
+
|
|
2842
|
+
if val != UNSET_SENTINEL:
|
|
2843
|
+
if val is not None or k not in optional_fields:
|
|
2844
|
+
m[k] = val
|
|
2845
|
+
|
|
2846
|
+
return m
|
|
2847
|
+
|
|
2848
|
+
|
|
2849
|
+
UpdatePromptLoadBalancerPromptsType = Literal["weight_based",]
|
|
2850
|
+
|
|
2851
|
+
|
|
2852
|
+
class UpdatePromptLoadBalancerPromptsModelsTypedDict(TypedDict):
|
|
2853
|
+
model: str
|
|
2854
|
+
r"""Model identifier for load balancing"""
|
|
2855
|
+
weight: NotRequired[float]
|
|
2856
|
+
r"""Weight assigned to this model for load balancing"""
|
|
2857
|
+
|
|
2858
|
+
|
|
2859
|
+
class UpdatePromptLoadBalancerPromptsModels(BaseModel):
|
|
2860
|
+
model: str
|
|
2861
|
+
r"""Model identifier for load balancing"""
|
|
2862
|
+
|
|
2863
|
+
weight: Optional[float] = 0.5
|
|
2864
|
+
r"""Weight assigned to this model for load balancing"""
|
|
2865
|
+
|
|
2866
|
+
@model_serializer(mode="wrap")
|
|
2867
|
+
def serialize_model(self, handler):
|
|
2868
|
+
optional_fields = set(["weight"])
|
|
2869
|
+
serialized = handler(self)
|
|
2870
|
+
m = {}
|
|
2871
|
+
|
|
2872
|
+
for n, f in type(self).model_fields.items():
|
|
2873
|
+
k = f.alias or n
|
|
2874
|
+
val = serialized.get(k)
|
|
2875
|
+
|
|
2876
|
+
if val != UNSET_SENTINEL:
|
|
2877
|
+
if val is not None or k not in optional_fields:
|
|
2878
|
+
m[k] = val
|
|
2879
|
+
|
|
2880
|
+
return m
|
|
2881
|
+
|
|
2882
|
+
|
|
2883
|
+
class UpdatePromptLoadBalancerPrompts1TypedDict(TypedDict):
|
|
2884
|
+
type: UpdatePromptLoadBalancerPromptsType
|
|
2885
|
+
models: List[UpdatePromptLoadBalancerPromptsModelsTypedDict]
|
|
2886
|
+
|
|
2887
|
+
|
|
2888
|
+
class UpdatePromptLoadBalancerPrompts1(BaseModel):
|
|
2889
|
+
type: UpdatePromptLoadBalancerPromptsType
|
|
2890
|
+
|
|
2891
|
+
models: List[UpdatePromptLoadBalancerPromptsModels]
|
|
2892
|
+
|
|
2893
|
+
|
|
2894
|
+
UpdatePromptPromptsLoadBalancerTypedDict = UpdatePromptLoadBalancerPrompts1TypedDict
|
|
2895
|
+
r"""Load balancer configuration for the request."""
|
|
2896
|
+
|
|
2897
|
+
|
|
2898
|
+
UpdatePromptPromptsLoadBalancer = UpdatePromptLoadBalancerPrompts1
|
|
2899
|
+
r"""Load balancer configuration for the request."""
|
|
2900
|
+
|
|
2901
|
+
|
|
2902
|
+
class UpdatePromptPromptsTimeoutTypedDict(TypedDict):
|
|
2903
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2904
|
+
|
|
2905
|
+
call_timeout: float
|
|
2906
|
+
r"""Timeout value in milliseconds"""
|
|
2907
|
+
|
|
2908
|
+
|
|
2909
|
+
class UpdatePromptPromptsTimeout(BaseModel):
|
|
2910
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
2911
|
+
|
|
2912
|
+
call_timeout: float
|
|
2913
|
+
r"""Timeout value in milliseconds"""
|
|
2914
|
+
|
|
2915
|
+
|
|
2583
2916
|
UpdatePromptMessagesPromptsResponse200ApplicationJSONResponseBodyRole = Literal["tool",]
|
|
2584
2917
|
r"""The role of the messages author, in this case tool."""
|
|
2585
2918
|
|
|
@@ -3176,6 +3509,8 @@ UpdatePromptPromptsResponseMessages = Annotated[
|
|
|
3176
3509
|
class UpdatePromptPromptFieldTypedDict(TypedDict):
|
|
3177
3510
|
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
3178
3511
|
|
|
3512
|
+
name: NotRequired[str]
|
|
3513
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
3179
3514
|
audio: NotRequired[Nullable[UpdatePromptPromptsAudioTypedDict]]
|
|
3180
3515
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
3181
3516
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -3230,6 +3565,16 @@ class UpdatePromptPromptFieldTypedDict(TypedDict):
|
|
|
3230
3565
|
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
3231
3566
|
guardrails: NotRequired[List[UpdatePromptPromptsGuardrailsTypedDict]]
|
|
3232
3567
|
r"""A list of guardrails to apply to the request."""
|
|
3568
|
+
fallbacks: NotRequired[List[UpdatePromptPromptsFallbacksTypedDict]]
|
|
3569
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
3570
|
+
retry: NotRequired[UpdatePromptPromptsRetryTypedDict]
|
|
3571
|
+
r"""Retry configuration for the request"""
|
|
3572
|
+
cache: NotRequired[UpdatePromptPromptsCacheTypedDict]
|
|
3573
|
+
r"""Cache configuration for the request."""
|
|
3574
|
+
load_balancer: NotRequired[UpdatePromptPromptsLoadBalancerTypedDict]
|
|
3575
|
+
r"""Load balancer configuration for the request."""
|
|
3576
|
+
timeout: NotRequired[UpdatePromptPromptsTimeoutTypedDict]
|
|
3577
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
3233
3578
|
messages: NotRequired[List[UpdatePromptPromptsResponseMessagesTypedDict]]
|
|
3234
3579
|
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
3235
3580
|
model: NotRequired[Nullable[str]]
|
|
@@ -3240,6 +3585,9 @@ class UpdatePromptPromptFieldTypedDict(TypedDict):
|
|
|
3240
3585
|
class UpdatePromptPromptField(BaseModel):
|
|
3241
3586
|
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
3242
3587
|
|
|
3588
|
+
name: Optional[str] = None
|
|
3589
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
3590
|
+
|
|
3243
3591
|
audio: OptionalNullable[UpdatePromptPromptsAudio] = UNSET
|
|
3244
3592
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
3245
3593
|
|
|
@@ -3316,6 +3664,21 @@ class UpdatePromptPromptField(BaseModel):
|
|
|
3316
3664
|
guardrails: Optional[List[UpdatePromptPromptsGuardrails]] = None
|
|
3317
3665
|
r"""A list of guardrails to apply to the request."""
|
|
3318
3666
|
|
|
3667
|
+
fallbacks: Optional[List[UpdatePromptPromptsFallbacks]] = None
|
|
3668
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
3669
|
+
|
|
3670
|
+
retry: Optional[UpdatePromptPromptsRetry] = None
|
|
3671
|
+
r"""Retry configuration for the request"""
|
|
3672
|
+
|
|
3673
|
+
cache: Optional[UpdatePromptPromptsCache] = None
|
|
3674
|
+
r"""Cache configuration for the request."""
|
|
3675
|
+
|
|
3676
|
+
load_balancer: Optional[UpdatePromptPromptsLoadBalancer] = None
|
|
3677
|
+
r"""Load balancer configuration for the request."""
|
|
3678
|
+
|
|
3679
|
+
timeout: Optional[UpdatePromptPromptsTimeout] = None
|
|
3680
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
3681
|
+
|
|
3319
3682
|
messages: Optional[List[UpdatePromptPromptsResponseMessages]] = None
|
|
3320
3683
|
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
3321
3684
|
|
|
@@ -3328,6 +3691,7 @@ class UpdatePromptPromptField(BaseModel):
|
|
|
3328
3691
|
def serialize_model(self, handler):
|
|
3329
3692
|
optional_fields = set(
|
|
3330
3693
|
[
|
|
3694
|
+
"name",
|
|
3331
3695
|
"audio",
|
|
3332
3696
|
"frequency_penalty",
|
|
3333
3697
|
"max_tokens",
|
|
@@ -3350,6 +3714,11 @@ class UpdatePromptPromptField(BaseModel):
|
|
|
3350
3714
|
"parallel_tool_calls",
|
|
3351
3715
|
"modalities",
|
|
3352
3716
|
"guardrails",
|
|
3717
|
+
"fallbacks",
|
|
3718
|
+
"retry",
|
|
3719
|
+
"cache",
|
|
3720
|
+
"load_balancer",
|
|
3721
|
+
"timeout",
|
|
3353
3722
|
"messages",
|
|
3354
3723
|
"model",
|
|
3355
3724
|
"version",
|
|
@@ -3477,7 +3846,7 @@ class UpdatePromptPromptTypedDict(TypedDict):
|
|
|
3477
3846
|
r"""A prompt entity with configuration, metadata, and versioning."""
|
|
3478
3847
|
|
|
3479
3848
|
id: str
|
|
3480
|
-
type:
|
|
3849
|
+
type: UpdatePromptPromptsType
|
|
3481
3850
|
owner: str
|
|
3482
3851
|
domain_id: str
|
|
3483
3852
|
created: str
|
|
@@ -3500,7 +3869,7 @@ class UpdatePromptPrompt(BaseModel):
|
|
|
3500
3869
|
|
|
3501
3870
|
id: Annotated[str, pydantic.Field(alias="_id")]
|
|
3502
3871
|
|
|
3503
|
-
type:
|
|
3872
|
+
type: UpdatePromptPromptsType
|
|
3504
3873
|
|
|
3505
3874
|
owner: str
|
|
3506
3875
|
|
|
@@ -259,7 +259,7 @@ class RequestBodyTools(BaseModel):
|
|
|
259
259
|
|
|
260
260
|
schema_: Annotated[UpdateToolRequestBodyToolsSchema, pydantic.Field(alias="schema")]
|
|
261
261
|
|
|
262
|
-
id: Optional[str] = "
|
|
262
|
+
id: Optional[str] = "01KG2GN22V3X563SZMVFHPYQF8"
|
|
263
263
|
|
|
264
264
|
description: Optional[str] = None
|
|
265
265
|
|
|
@@ -1214,7 +1214,7 @@ class UpdateToolResponseBodyCodeExecutionTool(BaseModel):
|
|
|
1214
1214
|
code_tool: UpdateToolResponseBodyCodeTool
|
|
1215
1215
|
|
|
1216
1216
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1217
|
-
"
|
|
1217
|
+
"tool_01KG2GN22N7KZ8Q1C8QMMEGGMF"
|
|
1218
1218
|
)
|
|
1219
1219
|
|
|
1220
1220
|
display_name: Optional[str] = None
|
|
@@ -1345,7 +1345,7 @@ class UpdateToolResponseBodyTools(BaseModel):
|
|
|
1345
1345
|
UpdateToolResponseBodyToolsSchema, pydantic.Field(alias="schema")
|
|
1346
1346
|
]
|
|
1347
1347
|
|
|
1348
|
-
id: Optional[str] = "
|
|
1348
|
+
id: Optional[str] = "01KG2GN22MH45GSTAYXXG3009H"
|
|
1349
1349
|
|
|
1350
1350
|
description: Optional[str] = None
|
|
1351
1351
|
|
|
@@ -1476,7 +1476,7 @@ class UpdateToolResponseBodyMCPTool(BaseModel):
|
|
|
1476
1476
|
mcp: UpdateToolResponseBodyMcp
|
|
1477
1477
|
|
|
1478
1478
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1479
|
-
"
|
|
1479
|
+
"tool_01KG2GN22KWGHKBXE19Z1RAWMH"
|
|
1480
1480
|
)
|
|
1481
1481
|
|
|
1482
1482
|
display_name: Optional[str] = None
|
|
@@ -1777,7 +1777,7 @@ class UpdateToolResponseBodyHTTPTool(BaseModel):
|
|
|
1777
1777
|
http: UpdateToolResponseBodyHTTP
|
|
1778
1778
|
|
|
1779
1779
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1780
|
-
"
|
|
1780
|
+
"tool_01KG2GN22H6HDWZGXMAPJ70XQD"
|
|
1781
1781
|
)
|
|
1782
1782
|
|
|
1783
1783
|
display_name: Optional[str] = None
|
|
@@ -1972,7 +1972,7 @@ class UpdateToolResponseBodyJSONSchemaTool(BaseModel):
|
|
|
1972
1972
|
json_schema: UpdateToolResponseBodyJSONSchema
|
|
1973
1973
|
|
|
1974
1974
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1975
|
-
"
|
|
1975
|
+
"tool_01KG2GN22GH5ACSHZDZS8748MW"
|
|
1976
1976
|
)
|
|
1977
1977
|
|
|
1978
1978
|
display_name: Optional[str] = None
|
|
@@ -2171,7 +2171,7 @@ class UpdateToolResponseBodyFunctionTool(BaseModel):
|
|
|
2171
2171
|
function: UpdateToolResponseBodyFunction
|
|
2172
2172
|
|
|
2173
2173
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
2174
|
-
"
|
|
2174
|
+
"tool_01KG2GN22F7DAZ551QPG1JRBHH"
|
|
2175
2175
|
)
|
|
2176
2176
|
|
|
2177
2177
|
display_name: Optional[str] = None
|