orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5746 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1945 -383
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1381 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +407 -128
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2759 -1251
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1696 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1679 -230
- orq_ai_sdk/models/getpromptversionop.py +1676 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +805 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1690 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1462 -196
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1439 -194
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1968 -397
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2854 -1448
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +364 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
- orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -138,6 +138,32 @@ class RetrieveAgentRequestTools(BaseModel):
|
|
|
138
138
|
timeout: Optional[float] = 120
|
|
139
139
|
r"""Tool execution timeout in seconds (default: 2 minutes, max: 10 minutes)"""
|
|
140
140
|
|
|
141
|
+
@model_serializer(mode="wrap")
|
|
142
|
+
def serialize_model(self, handler):
|
|
143
|
+
optional_fields = set(
|
|
144
|
+
[
|
|
145
|
+
"key",
|
|
146
|
+
"display_name",
|
|
147
|
+
"description",
|
|
148
|
+
"requires_approval",
|
|
149
|
+
"tool_id",
|
|
150
|
+
"conditions",
|
|
151
|
+
"timeout",
|
|
152
|
+
]
|
|
153
|
+
)
|
|
154
|
+
serialized = handler(self)
|
|
155
|
+
m = {}
|
|
156
|
+
|
|
157
|
+
for n, f in type(self).model_fields.items():
|
|
158
|
+
k = f.alias or n
|
|
159
|
+
val = serialized.get(k)
|
|
160
|
+
|
|
161
|
+
if val != UNSET_SENTINEL:
|
|
162
|
+
if val is not None or k not in optional_fields:
|
|
163
|
+
m[k] = val
|
|
164
|
+
|
|
165
|
+
return m
|
|
166
|
+
|
|
141
167
|
|
|
142
168
|
RetrieveAgentRequestExecuteOn = Literal[
|
|
143
169
|
"input",
|
|
@@ -165,6 +191,22 @@ class RetrieveAgentRequestEvaluators(BaseModel):
|
|
|
165
191
|
sample_rate: Optional[float] = 50
|
|
166
192
|
r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
|
|
167
193
|
|
|
194
|
+
@model_serializer(mode="wrap")
|
|
195
|
+
def serialize_model(self, handler):
|
|
196
|
+
optional_fields = set(["sample_rate"])
|
|
197
|
+
serialized = handler(self)
|
|
198
|
+
m = {}
|
|
199
|
+
|
|
200
|
+
for n, f in type(self).model_fields.items():
|
|
201
|
+
k = f.alias or n
|
|
202
|
+
val = serialized.get(k)
|
|
203
|
+
|
|
204
|
+
if val != UNSET_SENTINEL:
|
|
205
|
+
if val is not None or k not in optional_fields:
|
|
206
|
+
m[k] = val
|
|
207
|
+
|
|
208
|
+
return m
|
|
209
|
+
|
|
168
210
|
|
|
169
211
|
RetrieveAgentRequestAgentsExecuteOn = Literal[
|
|
170
212
|
"input",
|
|
@@ -192,6 +234,22 @@ class RetrieveAgentRequestGuardrails(BaseModel):
|
|
|
192
234
|
sample_rate: Optional[float] = 50
|
|
193
235
|
r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
|
|
194
236
|
|
|
237
|
+
@model_serializer(mode="wrap")
|
|
238
|
+
def serialize_model(self, handler):
|
|
239
|
+
optional_fields = set(["sample_rate"])
|
|
240
|
+
serialized = handler(self)
|
|
241
|
+
m = {}
|
|
242
|
+
|
|
243
|
+
for n, f in type(self).model_fields.items():
|
|
244
|
+
k = f.alias or n
|
|
245
|
+
val = serialized.get(k)
|
|
246
|
+
|
|
247
|
+
if val != UNSET_SENTINEL:
|
|
248
|
+
if val is not None or k not in optional_fields:
|
|
249
|
+
m[k] = val
|
|
250
|
+
|
|
251
|
+
return m
|
|
252
|
+
|
|
195
253
|
|
|
196
254
|
class RetrieveAgentRequestSettingsTypedDict(TypedDict):
|
|
197
255
|
max_iterations: NotRequired[int]
|
|
@@ -227,6 +285,31 @@ class RetrieveAgentRequestSettings(BaseModel):
|
|
|
227
285
|
guardrails: Optional[List[RetrieveAgentRequestGuardrails]] = None
|
|
228
286
|
r"""Configuration for a guardrail applied to the agent"""
|
|
229
287
|
|
|
288
|
+
@model_serializer(mode="wrap")
|
|
289
|
+
def serialize_model(self, handler):
|
|
290
|
+
optional_fields = set(
|
|
291
|
+
[
|
|
292
|
+
"max_iterations",
|
|
293
|
+
"max_execution_time",
|
|
294
|
+
"tool_approval_required",
|
|
295
|
+
"tools",
|
|
296
|
+
"evaluators",
|
|
297
|
+
"guardrails",
|
|
298
|
+
]
|
|
299
|
+
)
|
|
300
|
+
serialized = handler(self)
|
|
301
|
+
m = {}
|
|
302
|
+
|
|
303
|
+
for n, f in type(self).model_fields.items():
|
|
304
|
+
k = f.alias or n
|
|
305
|
+
val = serialized.get(k)
|
|
306
|
+
|
|
307
|
+
if val != UNSET_SENTINEL:
|
|
308
|
+
if val is not None or k not in optional_fields:
|
|
309
|
+
m[k] = val
|
|
310
|
+
|
|
311
|
+
return m
|
|
312
|
+
|
|
230
313
|
|
|
231
314
|
RetrieveAgentRequestVoice = Literal[
|
|
232
315
|
"alloy",
|
|
@@ -295,6 +378,22 @@ class RetrieveAgentRequestResponseFormatJSONSchema(BaseModel):
|
|
|
295
378
|
strict: Optional[bool] = False
|
|
296
379
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
297
380
|
|
|
381
|
+
@model_serializer(mode="wrap")
|
|
382
|
+
def serialize_model(self, handler):
|
|
383
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
384
|
+
serialized = handler(self)
|
|
385
|
+
m = {}
|
|
386
|
+
|
|
387
|
+
for n, f in type(self).model_fields.items():
|
|
388
|
+
k = f.alias or n
|
|
389
|
+
val = serialized.get(k)
|
|
390
|
+
|
|
391
|
+
if val != UNSET_SENTINEL:
|
|
392
|
+
if val is not None or k not in optional_fields:
|
|
393
|
+
m[k] = val
|
|
394
|
+
|
|
395
|
+
return m
|
|
396
|
+
|
|
298
397
|
|
|
299
398
|
class RetrieveAgentRequestResponseFormatAgentsJSONSchemaTypedDict(TypedDict):
|
|
300
399
|
r"""
|
|
@@ -427,6 +526,22 @@ class RetrieveAgentRequestStreamOptions(BaseModel):
|
|
|
427
526
|
include_usage: Optional[bool] = None
|
|
428
527
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
429
528
|
|
|
529
|
+
@model_serializer(mode="wrap")
|
|
530
|
+
def serialize_model(self, handler):
|
|
531
|
+
optional_fields = set(["include_usage"])
|
|
532
|
+
serialized = handler(self)
|
|
533
|
+
m = {}
|
|
534
|
+
|
|
535
|
+
for n, f in type(self).model_fields.items():
|
|
536
|
+
k = f.alias or n
|
|
537
|
+
val = serialized.get(k)
|
|
538
|
+
|
|
539
|
+
if val != UNSET_SENTINEL:
|
|
540
|
+
if val is not None or k not in optional_fields:
|
|
541
|
+
m[k] = val
|
|
542
|
+
|
|
543
|
+
return m
|
|
544
|
+
|
|
430
545
|
|
|
431
546
|
RetrieveAgentRequestThinkingTypedDict = TypeAliasType(
|
|
432
547
|
"RetrieveAgentRequestThinkingTypedDict",
|
|
@@ -469,6 +584,22 @@ class RetrieveAgentRequestToolChoice2(BaseModel):
|
|
|
469
584
|
type: Optional[RetrieveAgentRequestToolChoiceType] = None
|
|
470
585
|
r"""The type of the tool. Currently, only function is supported."""
|
|
471
586
|
|
|
587
|
+
@model_serializer(mode="wrap")
|
|
588
|
+
def serialize_model(self, handler):
|
|
589
|
+
optional_fields = set(["type"])
|
|
590
|
+
serialized = handler(self)
|
|
591
|
+
m = {}
|
|
592
|
+
|
|
593
|
+
for n, f in type(self).model_fields.items():
|
|
594
|
+
k = f.alias or n
|
|
595
|
+
val = serialized.get(k)
|
|
596
|
+
|
|
597
|
+
if val != UNSET_SENTINEL:
|
|
598
|
+
if val is not None or k not in optional_fields:
|
|
599
|
+
m[k] = val
|
|
600
|
+
|
|
601
|
+
return m
|
|
602
|
+
|
|
472
603
|
|
|
473
604
|
RetrieveAgentRequestToolChoice1 = Literal[
|
|
474
605
|
"none",
|
|
@@ -535,9 +666,159 @@ class RetrieveAgentRequestAgentsGuardrails(BaseModel):
|
|
|
535
666
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
536
667
|
|
|
537
668
|
|
|
669
|
+
class RetrieveAgentRequestFallbacksTypedDict(TypedDict):
|
|
670
|
+
model: str
|
|
671
|
+
r"""Fallback model identifier"""
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
class RetrieveAgentRequestFallbacks(BaseModel):
|
|
675
|
+
model: str
|
|
676
|
+
r"""Fallback model identifier"""
|
|
677
|
+
|
|
678
|
+
|
|
679
|
+
class RetrieveAgentRequestAgentsRetryTypedDict(TypedDict):
|
|
680
|
+
r"""Retry configuration for the request"""
|
|
681
|
+
|
|
682
|
+
count: NotRequired[float]
|
|
683
|
+
r"""Number of retry attempts (1-5)"""
|
|
684
|
+
on_codes: NotRequired[List[float]]
|
|
685
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
class RetrieveAgentRequestAgentsRetry(BaseModel):
|
|
689
|
+
r"""Retry configuration for the request"""
|
|
690
|
+
|
|
691
|
+
count: Optional[float] = 3
|
|
692
|
+
r"""Number of retry attempts (1-5)"""
|
|
693
|
+
|
|
694
|
+
on_codes: Optional[List[float]] = None
|
|
695
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
696
|
+
|
|
697
|
+
@model_serializer(mode="wrap")
|
|
698
|
+
def serialize_model(self, handler):
|
|
699
|
+
optional_fields = set(["count", "on_codes"])
|
|
700
|
+
serialized = handler(self)
|
|
701
|
+
m = {}
|
|
702
|
+
|
|
703
|
+
for n, f in type(self).model_fields.items():
|
|
704
|
+
k = f.alias or n
|
|
705
|
+
val = serialized.get(k)
|
|
706
|
+
|
|
707
|
+
if val != UNSET_SENTINEL:
|
|
708
|
+
if val is not None or k not in optional_fields:
|
|
709
|
+
m[k] = val
|
|
710
|
+
|
|
711
|
+
return m
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
RetrieveAgentRequestType = Literal["exact_match",]
|
|
715
|
+
|
|
716
|
+
|
|
717
|
+
class RetrieveAgentRequestCacheTypedDict(TypedDict):
|
|
718
|
+
r"""Cache configuration for the request."""
|
|
719
|
+
|
|
720
|
+
type: RetrieveAgentRequestType
|
|
721
|
+
ttl: NotRequired[float]
|
|
722
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
723
|
+
|
|
724
|
+
|
|
725
|
+
class RetrieveAgentRequestCache(BaseModel):
|
|
726
|
+
r"""Cache configuration for the request."""
|
|
727
|
+
|
|
728
|
+
type: RetrieveAgentRequestType
|
|
729
|
+
|
|
730
|
+
ttl: Optional[float] = 1800
|
|
731
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
732
|
+
|
|
733
|
+
@model_serializer(mode="wrap")
|
|
734
|
+
def serialize_model(self, handler):
|
|
735
|
+
optional_fields = set(["ttl"])
|
|
736
|
+
serialized = handler(self)
|
|
737
|
+
m = {}
|
|
738
|
+
|
|
739
|
+
for n, f in type(self).model_fields.items():
|
|
740
|
+
k = f.alias or n
|
|
741
|
+
val = serialized.get(k)
|
|
742
|
+
|
|
743
|
+
if val != UNSET_SENTINEL:
|
|
744
|
+
if val is not None or k not in optional_fields:
|
|
745
|
+
m[k] = val
|
|
746
|
+
|
|
747
|
+
return m
|
|
748
|
+
|
|
749
|
+
|
|
750
|
+
RetrieveAgentRequestLoadBalancerType = Literal["weight_based",]
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
class RetrieveAgentRequestLoadBalancerModelsTypedDict(TypedDict):
|
|
754
|
+
model: str
|
|
755
|
+
r"""Model identifier for load balancing"""
|
|
756
|
+
weight: NotRequired[float]
|
|
757
|
+
r"""Weight assigned to this model for load balancing"""
|
|
758
|
+
|
|
759
|
+
|
|
760
|
+
class RetrieveAgentRequestLoadBalancerModels(BaseModel):
|
|
761
|
+
model: str
|
|
762
|
+
r"""Model identifier for load balancing"""
|
|
763
|
+
|
|
764
|
+
weight: Optional[float] = 0.5
|
|
765
|
+
r"""Weight assigned to this model for load balancing"""
|
|
766
|
+
|
|
767
|
+
@model_serializer(mode="wrap")
|
|
768
|
+
def serialize_model(self, handler):
|
|
769
|
+
optional_fields = set(["weight"])
|
|
770
|
+
serialized = handler(self)
|
|
771
|
+
m = {}
|
|
772
|
+
|
|
773
|
+
for n, f in type(self).model_fields.items():
|
|
774
|
+
k = f.alias or n
|
|
775
|
+
val = serialized.get(k)
|
|
776
|
+
|
|
777
|
+
if val != UNSET_SENTINEL:
|
|
778
|
+
if val is not None or k not in optional_fields:
|
|
779
|
+
m[k] = val
|
|
780
|
+
|
|
781
|
+
return m
|
|
782
|
+
|
|
783
|
+
|
|
784
|
+
class RetrieveAgentRequestLoadBalancer1TypedDict(TypedDict):
|
|
785
|
+
type: RetrieveAgentRequestLoadBalancerType
|
|
786
|
+
models: List[RetrieveAgentRequestLoadBalancerModelsTypedDict]
|
|
787
|
+
|
|
788
|
+
|
|
789
|
+
class RetrieveAgentRequestLoadBalancer1(BaseModel):
|
|
790
|
+
type: RetrieveAgentRequestLoadBalancerType
|
|
791
|
+
|
|
792
|
+
models: List[RetrieveAgentRequestLoadBalancerModels]
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
RetrieveAgentRequestLoadBalancerTypedDict = RetrieveAgentRequestLoadBalancer1TypedDict
|
|
796
|
+
r"""Load balancer configuration for the request."""
|
|
797
|
+
|
|
798
|
+
|
|
799
|
+
RetrieveAgentRequestLoadBalancer = RetrieveAgentRequestLoadBalancer1
|
|
800
|
+
r"""Load balancer configuration for the request."""
|
|
801
|
+
|
|
802
|
+
|
|
803
|
+
class RetrieveAgentRequestTimeoutTypedDict(TypedDict):
|
|
804
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
805
|
+
|
|
806
|
+
call_timeout: float
|
|
807
|
+
r"""Timeout value in milliseconds"""
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
class RetrieveAgentRequestTimeout(BaseModel):
|
|
811
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
812
|
+
|
|
813
|
+
call_timeout: float
|
|
814
|
+
r"""Timeout value in milliseconds"""
|
|
815
|
+
|
|
816
|
+
|
|
538
817
|
class RetrieveAgentRequestParametersTypedDict(TypedDict):
|
|
539
818
|
r"""Model behavior parameters (snake_case) stored as part of the agent configuration. These become the default parameters used when the agent is executed. Commonly used: temperature (0-1, controls randomness), max_completion_tokens (response length), top_p (nucleus sampling). Advanced: frequency_penalty, presence_penalty, response_format (JSON/structured output), reasoning_effort (for o1/thinking models), seed (reproducibility), stop sequences. Model-specific support varies. Runtime parameters in agent execution requests can override these defaults."""
|
|
540
819
|
|
|
820
|
+
name: NotRequired[str]
|
|
821
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
541
822
|
audio: NotRequired[Nullable[RetrieveAgentRequestAudioTypedDict]]
|
|
542
823
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
543
824
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -592,11 +873,24 @@ class RetrieveAgentRequestParametersTypedDict(TypedDict):
|
|
|
592
873
|
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
593
874
|
guardrails: NotRequired[List[RetrieveAgentRequestAgentsGuardrailsTypedDict]]
|
|
594
875
|
r"""A list of guardrails to apply to the request."""
|
|
876
|
+
fallbacks: NotRequired[List[RetrieveAgentRequestFallbacksTypedDict]]
|
|
877
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
878
|
+
retry: NotRequired[RetrieveAgentRequestAgentsRetryTypedDict]
|
|
879
|
+
r"""Retry configuration for the request"""
|
|
880
|
+
cache: NotRequired[RetrieveAgentRequestCacheTypedDict]
|
|
881
|
+
r"""Cache configuration for the request."""
|
|
882
|
+
load_balancer: NotRequired[RetrieveAgentRequestLoadBalancerTypedDict]
|
|
883
|
+
r"""Load balancer configuration for the request."""
|
|
884
|
+
timeout: NotRequired[RetrieveAgentRequestTimeoutTypedDict]
|
|
885
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
595
886
|
|
|
596
887
|
|
|
597
888
|
class RetrieveAgentRequestParameters(BaseModel):
|
|
598
889
|
r"""Model behavior parameters (snake_case) stored as part of the agent configuration. These become the default parameters used when the agent is executed. Commonly used: temperature (0-1, controls randomness), max_completion_tokens (response length), top_p (nucleus sampling). Advanced: frequency_penalty, presence_penalty, response_format (JSON/structured output), reasoning_effort (for o1/thinking models), seed (reproducibility), stop sequences. Model-specific support varies. Runtime parameters in agent execution requests can override these defaults."""
|
|
599
890
|
|
|
891
|
+
name: Optional[str] = None
|
|
892
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
893
|
+
|
|
600
894
|
audio: OptionalNullable[RetrieveAgentRequestAudio] = UNSET
|
|
601
895
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
602
896
|
|
|
@@ -673,72 +967,92 @@ class RetrieveAgentRequestParameters(BaseModel):
|
|
|
673
967
|
guardrails: Optional[List[RetrieveAgentRequestAgentsGuardrails]] = None
|
|
674
968
|
r"""A list of guardrails to apply to the request."""
|
|
675
969
|
|
|
970
|
+
fallbacks: Optional[List[RetrieveAgentRequestFallbacks]] = None
|
|
971
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
972
|
+
|
|
973
|
+
retry: Optional[RetrieveAgentRequestAgentsRetry] = None
|
|
974
|
+
r"""Retry configuration for the request"""
|
|
975
|
+
|
|
976
|
+
cache: Optional[RetrieveAgentRequestCache] = None
|
|
977
|
+
r"""Cache configuration for the request."""
|
|
978
|
+
|
|
979
|
+
load_balancer: Optional[RetrieveAgentRequestLoadBalancer] = None
|
|
980
|
+
r"""Load balancer configuration for the request."""
|
|
981
|
+
|
|
982
|
+
timeout: Optional[RetrieveAgentRequestTimeout] = None
|
|
983
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
984
|
+
|
|
676
985
|
@model_serializer(mode="wrap")
|
|
677
986
|
def serialize_model(self, handler):
|
|
678
|
-
optional_fields =
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
987
|
+
optional_fields = set(
|
|
988
|
+
[
|
|
989
|
+
"name",
|
|
990
|
+
"audio",
|
|
991
|
+
"frequency_penalty",
|
|
992
|
+
"max_tokens",
|
|
993
|
+
"max_completion_tokens",
|
|
994
|
+
"logprobs",
|
|
995
|
+
"top_logprobs",
|
|
996
|
+
"n",
|
|
997
|
+
"presence_penalty",
|
|
998
|
+
"response_format",
|
|
999
|
+
"reasoning_effort",
|
|
1000
|
+
"verbosity",
|
|
1001
|
+
"seed",
|
|
1002
|
+
"stop",
|
|
1003
|
+
"stream_options",
|
|
1004
|
+
"thinking",
|
|
1005
|
+
"temperature",
|
|
1006
|
+
"top_p",
|
|
1007
|
+
"top_k",
|
|
1008
|
+
"tool_choice",
|
|
1009
|
+
"parallel_tool_calls",
|
|
1010
|
+
"modalities",
|
|
1011
|
+
"guardrails",
|
|
1012
|
+
"fallbacks",
|
|
1013
|
+
"retry",
|
|
1014
|
+
"cache",
|
|
1015
|
+
"load_balancer",
|
|
1016
|
+
"timeout",
|
|
1017
|
+
]
|
|
1018
|
+
)
|
|
1019
|
+
nullable_fields = set(
|
|
1020
|
+
[
|
|
1021
|
+
"audio",
|
|
1022
|
+
"frequency_penalty",
|
|
1023
|
+
"max_tokens",
|
|
1024
|
+
"max_completion_tokens",
|
|
1025
|
+
"logprobs",
|
|
1026
|
+
"top_logprobs",
|
|
1027
|
+
"n",
|
|
1028
|
+
"presence_penalty",
|
|
1029
|
+
"seed",
|
|
1030
|
+
"stop",
|
|
1031
|
+
"stream_options",
|
|
1032
|
+
"temperature",
|
|
1033
|
+
"top_p",
|
|
1034
|
+
"top_k",
|
|
1035
|
+
"modalities",
|
|
1036
|
+
]
|
|
1037
|
+
)
|
|
721
1038
|
serialized = handler(self)
|
|
722
|
-
|
|
723
1039
|
m = {}
|
|
724
1040
|
|
|
725
1041
|
for n, f in type(self).model_fields.items():
|
|
726
1042
|
k = f.alias or n
|
|
727
1043
|
val = serialized.get(k)
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
):
|
|
741
|
-
m[k] = val
|
|
1044
|
+
is_nullable_and_explicitly_set = (
|
|
1045
|
+
k in nullable_fields
|
|
1046
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1047
|
+
)
|
|
1048
|
+
|
|
1049
|
+
if val != UNSET_SENTINEL:
|
|
1050
|
+
if (
|
|
1051
|
+
val is not None
|
|
1052
|
+
or k not in optional_fields
|
|
1053
|
+
or is_nullable_and_explicitly_set
|
|
1054
|
+
):
|
|
1055
|
+
m[k] = val
|
|
742
1056
|
|
|
743
1057
|
return m
|
|
744
1058
|
|
|
@@ -761,6 +1075,22 @@ class RetrieveAgentRequestRetry(BaseModel):
|
|
|
761
1075
|
on_codes: Optional[List[float]] = None
|
|
762
1076
|
r"""HTTP status codes that trigger retry logic"""
|
|
763
1077
|
|
|
1078
|
+
@model_serializer(mode="wrap")
|
|
1079
|
+
def serialize_model(self, handler):
|
|
1080
|
+
optional_fields = set(["count", "on_codes"])
|
|
1081
|
+
serialized = handler(self)
|
|
1082
|
+
m = {}
|
|
1083
|
+
|
|
1084
|
+
for n, f in type(self).model_fields.items():
|
|
1085
|
+
k = f.alias or n
|
|
1086
|
+
val = serialized.get(k)
|
|
1087
|
+
|
|
1088
|
+
if val != UNSET_SENTINEL:
|
|
1089
|
+
if val is not None or k not in optional_fields:
|
|
1090
|
+
m[k] = val
|
|
1091
|
+
|
|
1092
|
+
return m
|
|
1093
|
+
|
|
764
1094
|
|
|
765
1095
|
RetrieveAgentRequestFallbackModelConfigurationVoice = Literal[
|
|
766
1096
|
"alloy",
|
|
@@ -834,6 +1164,22 @@ class RetrieveAgentRequestResponseFormatAgentsResponseJSONSchema(BaseModel):
|
|
|
834
1164
|
strict: Optional[bool] = False
|
|
835
1165
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
836
1166
|
|
|
1167
|
+
@model_serializer(mode="wrap")
|
|
1168
|
+
def serialize_model(self, handler):
|
|
1169
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
1170
|
+
serialized = handler(self)
|
|
1171
|
+
m = {}
|
|
1172
|
+
|
|
1173
|
+
for n, f in type(self).model_fields.items():
|
|
1174
|
+
k = f.alias or n
|
|
1175
|
+
val = serialized.get(k)
|
|
1176
|
+
|
|
1177
|
+
if val != UNSET_SENTINEL:
|
|
1178
|
+
if val is not None or k not in optional_fields:
|
|
1179
|
+
m[k] = val
|
|
1180
|
+
|
|
1181
|
+
return m
|
|
1182
|
+
|
|
837
1183
|
|
|
838
1184
|
class RetrieveAgentRequestResponseFormatAgentsResponse200JSONSchemaTypedDict(TypedDict):
|
|
839
1185
|
r"""
|
|
@@ -971,6 +1317,22 @@ class RetrieveAgentRequestFallbackModelConfigurationStreamOptions(BaseModel):
|
|
|
971
1317
|
include_usage: Optional[bool] = None
|
|
972
1318
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
973
1319
|
|
|
1320
|
+
@model_serializer(mode="wrap")
|
|
1321
|
+
def serialize_model(self, handler):
|
|
1322
|
+
optional_fields = set(["include_usage"])
|
|
1323
|
+
serialized = handler(self)
|
|
1324
|
+
m = {}
|
|
1325
|
+
|
|
1326
|
+
for n, f in type(self).model_fields.items():
|
|
1327
|
+
k = f.alias or n
|
|
1328
|
+
val = serialized.get(k)
|
|
1329
|
+
|
|
1330
|
+
if val != UNSET_SENTINEL:
|
|
1331
|
+
if val is not None or k not in optional_fields:
|
|
1332
|
+
m[k] = val
|
|
1333
|
+
|
|
1334
|
+
return m
|
|
1335
|
+
|
|
974
1336
|
|
|
975
1337
|
RetrieveAgentRequestFallbackModelConfigurationThinkingTypedDict = TypeAliasType(
|
|
976
1338
|
"RetrieveAgentRequestFallbackModelConfigurationThinkingTypedDict",
|
|
@@ -1013,6 +1375,22 @@ class RetrieveAgentRequestToolChoiceAgents2(BaseModel):
|
|
|
1013
1375
|
type: Optional[RetrieveAgentRequestToolChoiceAgentsType] = None
|
|
1014
1376
|
r"""The type of the tool. Currently, only function is supported."""
|
|
1015
1377
|
|
|
1378
|
+
@model_serializer(mode="wrap")
|
|
1379
|
+
def serialize_model(self, handler):
|
|
1380
|
+
optional_fields = set(["type"])
|
|
1381
|
+
serialized = handler(self)
|
|
1382
|
+
m = {}
|
|
1383
|
+
|
|
1384
|
+
for n, f in type(self).model_fields.items():
|
|
1385
|
+
k = f.alias or n
|
|
1386
|
+
val = serialized.get(k)
|
|
1387
|
+
|
|
1388
|
+
if val != UNSET_SENTINEL:
|
|
1389
|
+
if val is not None or k not in optional_fields:
|
|
1390
|
+
m[k] = val
|
|
1391
|
+
|
|
1392
|
+
return m
|
|
1393
|
+
|
|
1016
1394
|
|
|
1017
1395
|
RetrieveAgentRequestToolChoiceAgents1 = Literal[
|
|
1018
1396
|
"none",
|
|
@@ -1084,9 +1462,163 @@ class RetrieveAgentRequestFallbackModelConfigurationGuardrails(BaseModel):
|
|
|
1084
1462
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1085
1463
|
|
|
1086
1464
|
|
|
1465
|
+
class RetrieveAgentRequestFallbackModelConfigurationFallbacksTypedDict(TypedDict):
|
|
1466
|
+
model: str
|
|
1467
|
+
r"""Fallback model identifier"""
|
|
1468
|
+
|
|
1469
|
+
|
|
1470
|
+
class RetrieveAgentRequestFallbackModelConfigurationFallbacks(BaseModel):
|
|
1471
|
+
model: str
|
|
1472
|
+
r"""Fallback model identifier"""
|
|
1473
|
+
|
|
1474
|
+
|
|
1475
|
+
class RetrieveAgentRequestFallbackModelConfigurationAgentsRetryTypedDict(TypedDict):
|
|
1476
|
+
r"""Retry configuration for the request"""
|
|
1477
|
+
|
|
1478
|
+
count: NotRequired[float]
|
|
1479
|
+
r"""Number of retry attempts (1-5)"""
|
|
1480
|
+
on_codes: NotRequired[List[float]]
|
|
1481
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
class RetrieveAgentRequestFallbackModelConfigurationAgentsRetry(BaseModel):
|
|
1485
|
+
r"""Retry configuration for the request"""
|
|
1486
|
+
|
|
1487
|
+
count: Optional[float] = 3
|
|
1488
|
+
r"""Number of retry attempts (1-5)"""
|
|
1489
|
+
|
|
1490
|
+
on_codes: Optional[List[float]] = None
|
|
1491
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1492
|
+
|
|
1493
|
+
@model_serializer(mode="wrap")
|
|
1494
|
+
def serialize_model(self, handler):
|
|
1495
|
+
optional_fields = set(["count", "on_codes"])
|
|
1496
|
+
serialized = handler(self)
|
|
1497
|
+
m = {}
|
|
1498
|
+
|
|
1499
|
+
for n, f in type(self).model_fields.items():
|
|
1500
|
+
k = f.alias or n
|
|
1501
|
+
val = serialized.get(k)
|
|
1502
|
+
|
|
1503
|
+
if val != UNSET_SENTINEL:
|
|
1504
|
+
if val is not None or k not in optional_fields:
|
|
1505
|
+
m[k] = val
|
|
1506
|
+
|
|
1507
|
+
return m
|
|
1508
|
+
|
|
1509
|
+
|
|
1510
|
+
RetrieveAgentRequestFallbackModelConfigurationType = Literal["exact_match",]
|
|
1511
|
+
|
|
1512
|
+
|
|
1513
|
+
class RetrieveAgentRequestFallbackModelConfigurationCacheTypedDict(TypedDict):
|
|
1514
|
+
r"""Cache configuration for the request."""
|
|
1515
|
+
|
|
1516
|
+
type: RetrieveAgentRequestFallbackModelConfigurationType
|
|
1517
|
+
ttl: NotRequired[float]
|
|
1518
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1519
|
+
|
|
1520
|
+
|
|
1521
|
+
class RetrieveAgentRequestFallbackModelConfigurationCache(BaseModel):
|
|
1522
|
+
r"""Cache configuration for the request."""
|
|
1523
|
+
|
|
1524
|
+
type: RetrieveAgentRequestFallbackModelConfigurationType
|
|
1525
|
+
|
|
1526
|
+
ttl: Optional[float] = 1800
|
|
1527
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1528
|
+
|
|
1529
|
+
@model_serializer(mode="wrap")
|
|
1530
|
+
def serialize_model(self, handler):
|
|
1531
|
+
optional_fields = set(["ttl"])
|
|
1532
|
+
serialized = handler(self)
|
|
1533
|
+
m = {}
|
|
1534
|
+
|
|
1535
|
+
for n, f in type(self).model_fields.items():
|
|
1536
|
+
k = f.alias or n
|
|
1537
|
+
val = serialized.get(k)
|
|
1538
|
+
|
|
1539
|
+
if val != UNSET_SENTINEL:
|
|
1540
|
+
if val is not None or k not in optional_fields:
|
|
1541
|
+
m[k] = val
|
|
1542
|
+
|
|
1543
|
+
return m
|
|
1544
|
+
|
|
1545
|
+
|
|
1546
|
+
RetrieveAgentRequestLoadBalancerAgentsType = Literal["weight_based",]
|
|
1547
|
+
|
|
1548
|
+
|
|
1549
|
+
class RetrieveAgentRequestLoadBalancerAgentsModelsTypedDict(TypedDict):
|
|
1550
|
+
model: str
|
|
1551
|
+
r"""Model identifier for load balancing"""
|
|
1552
|
+
weight: NotRequired[float]
|
|
1553
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1554
|
+
|
|
1555
|
+
|
|
1556
|
+
class RetrieveAgentRequestLoadBalancerAgentsModels(BaseModel):
|
|
1557
|
+
model: str
|
|
1558
|
+
r"""Model identifier for load balancing"""
|
|
1559
|
+
|
|
1560
|
+
weight: Optional[float] = 0.5
|
|
1561
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1562
|
+
|
|
1563
|
+
@model_serializer(mode="wrap")
|
|
1564
|
+
def serialize_model(self, handler):
|
|
1565
|
+
optional_fields = set(["weight"])
|
|
1566
|
+
serialized = handler(self)
|
|
1567
|
+
m = {}
|
|
1568
|
+
|
|
1569
|
+
for n, f in type(self).model_fields.items():
|
|
1570
|
+
k = f.alias or n
|
|
1571
|
+
val = serialized.get(k)
|
|
1572
|
+
|
|
1573
|
+
if val != UNSET_SENTINEL:
|
|
1574
|
+
if val is not None or k not in optional_fields:
|
|
1575
|
+
m[k] = val
|
|
1576
|
+
|
|
1577
|
+
return m
|
|
1578
|
+
|
|
1579
|
+
|
|
1580
|
+
class RetrieveAgentRequestLoadBalancerAgents1TypedDict(TypedDict):
|
|
1581
|
+
type: RetrieveAgentRequestLoadBalancerAgentsType
|
|
1582
|
+
models: List[RetrieveAgentRequestLoadBalancerAgentsModelsTypedDict]
|
|
1583
|
+
|
|
1584
|
+
|
|
1585
|
+
class RetrieveAgentRequestLoadBalancerAgents1(BaseModel):
|
|
1586
|
+
type: RetrieveAgentRequestLoadBalancerAgentsType
|
|
1587
|
+
|
|
1588
|
+
models: List[RetrieveAgentRequestLoadBalancerAgentsModels]
|
|
1589
|
+
|
|
1590
|
+
|
|
1591
|
+
RetrieveAgentRequestFallbackModelConfigurationLoadBalancerTypedDict = (
|
|
1592
|
+
RetrieveAgentRequestLoadBalancerAgents1TypedDict
|
|
1593
|
+
)
|
|
1594
|
+
r"""Load balancer configuration for the request."""
|
|
1595
|
+
|
|
1596
|
+
|
|
1597
|
+
RetrieveAgentRequestFallbackModelConfigurationLoadBalancer = (
|
|
1598
|
+
RetrieveAgentRequestLoadBalancerAgents1
|
|
1599
|
+
)
|
|
1600
|
+
r"""Load balancer configuration for the request."""
|
|
1601
|
+
|
|
1602
|
+
|
|
1603
|
+
class RetrieveAgentRequestFallbackModelConfigurationTimeoutTypedDict(TypedDict):
|
|
1604
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1605
|
+
|
|
1606
|
+
call_timeout: float
|
|
1607
|
+
r"""Timeout value in milliseconds"""
|
|
1608
|
+
|
|
1609
|
+
|
|
1610
|
+
class RetrieveAgentRequestFallbackModelConfigurationTimeout(BaseModel):
|
|
1611
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1612
|
+
|
|
1613
|
+
call_timeout: float
|
|
1614
|
+
r"""Timeout value in milliseconds"""
|
|
1615
|
+
|
|
1616
|
+
|
|
1087
1617
|
class RetrieveAgentRequestFallbackModelConfigurationParametersTypedDict(TypedDict):
|
|
1088
1618
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1089
1619
|
|
|
1620
|
+
name: NotRequired[str]
|
|
1621
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1090
1622
|
audio: NotRequired[
|
|
1091
1623
|
Nullable[RetrieveAgentRequestFallbackModelConfigurationAudioTypedDict]
|
|
1092
1624
|
]
|
|
@@ -1159,11 +1691,30 @@ class RetrieveAgentRequestFallbackModelConfigurationParametersTypedDict(TypedDic
|
|
|
1159
1691
|
List[RetrieveAgentRequestFallbackModelConfigurationGuardrailsTypedDict]
|
|
1160
1692
|
]
|
|
1161
1693
|
r"""A list of guardrails to apply to the request."""
|
|
1694
|
+
fallbacks: NotRequired[
|
|
1695
|
+
List[RetrieveAgentRequestFallbackModelConfigurationFallbacksTypedDict]
|
|
1696
|
+
]
|
|
1697
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1698
|
+
retry: NotRequired[
|
|
1699
|
+
RetrieveAgentRequestFallbackModelConfigurationAgentsRetryTypedDict
|
|
1700
|
+
]
|
|
1701
|
+
r"""Retry configuration for the request"""
|
|
1702
|
+
cache: NotRequired[RetrieveAgentRequestFallbackModelConfigurationCacheTypedDict]
|
|
1703
|
+
r"""Cache configuration for the request."""
|
|
1704
|
+
load_balancer: NotRequired[
|
|
1705
|
+
RetrieveAgentRequestFallbackModelConfigurationLoadBalancerTypedDict
|
|
1706
|
+
]
|
|
1707
|
+
r"""Load balancer configuration for the request."""
|
|
1708
|
+
timeout: NotRequired[RetrieveAgentRequestFallbackModelConfigurationTimeoutTypedDict]
|
|
1709
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1162
1710
|
|
|
1163
1711
|
|
|
1164
1712
|
class RetrieveAgentRequestFallbackModelConfigurationParameters(BaseModel):
|
|
1165
1713
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1166
1714
|
|
|
1715
|
+
name: Optional[str] = None
|
|
1716
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1717
|
+
|
|
1167
1718
|
audio: OptionalNullable[RetrieveAgentRequestFallbackModelConfigurationAudio] = UNSET
|
|
1168
1719
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1169
1720
|
|
|
@@ -1252,72 +1803,96 @@ class RetrieveAgentRequestFallbackModelConfigurationParameters(BaseModel):
|
|
|
1252
1803
|
] = None
|
|
1253
1804
|
r"""A list of guardrails to apply to the request."""
|
|
1254
1805
|
|
|
1806
|
+
fallbacks: Optional[
|
|
1807
|
+
List[RetrieveAgentRequestFallbackModelConfigurationFallbacks]
|
|
1808
|
+
] = None
|
|
1809
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1810
|
+
|
|
1811
|
+
retry: Optional[RetrieveAgentRequestFallbackModelConfigurationAgentsRetry] = None
|
|
1812
|
+
r"""Retry configuration for the request"""
|
|
1813
|
+
|
|
1814
|
+
cache: Optional[RetrieveAgentRequestFallbackModelConfigurationCache] = None
|
|
1815
|
+
r"""Cache configuration for the request."""
|
|
1816
|
+
|
|
1817
|
+
load_balancer: Optional[
|
|
1818
|
+
RetrieveAgentRequestFallbackModelConfigurationLoadBalancer
|
|
1819
|
+
] = None
|
|
1820
|
+
r"""Load balancer configuration for the request."""
|
|
1821
|
+
|
|
1822
|
+
timeout: Optional[RetrieveAgentRequestFallbackModelConfigurationTimeout] = None
|
|
1823
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1824
|
+
|
|
1255
1825
|
@model_serializer(mode="wrap")
|
|
1256
1826
|
def serialize_model(self, handler):
|
|
1257
|
-
optional_fields =
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1827
|
+
optional_fields = set(
|
|
1828
|
+
[
|
|
1829
|
+
"name",
|
|
1830
|
+
"audio",
|
|
1831
|
+
"frequency_penalty",
|
|
1832
|
+
"max_tokens",
|
|
1833
|
+
"max_completion_tokens",
|
|
1834
|
+
"logprobs",
|
|
1835
|
+
"top_logprobs",
|
|
1836
|
+
"n",
|
|
1837
|
+
"presence_penalty",
|
|
1838
|
+
"response_format",
|
|
1839
|
+
"reasoning_effort",
|
|
1840
|
+
"verbosity",
|
|
1841
|
+
"seed",
|
|
1842
|
+
"stop",
|
|
1843
|
+
"stream_options",
|
|
1844
|
+
"thinking",
|
|
1845
|
+
"temperature",
|
|
1846
|
+
"top_p",
|
|
1847
|
+
"top_k",
|
|
1848
|
+
"tool_choice",
|
|
1849
|
+
"parallel_tool_calls",
|
|
1850
|
+
"modalities",
|
|
1851
|
+
"guardrails",
|
|
1852
|
+
"fallbacks",
|
|
1853
|
+
"retry",
|
|
1854
|
+
"cache",
|
|
1855
|
+
"load_balancer",
|
|
1856
|
+
"timeout",
|
|
1857
|
+
]
|
|
1858
|
+
)
|
|
1859
|
+
nullable_fields = set(
|
|
1860
|
+
[
|
|
1861
|
+
"audio",
|
|
1862
|
+
"frequency_penalty",
|
|
1863
|
+
"max_tokens",
|
|
1864
|
+
"max_completion_tokens",
|
|
1865
|
+
"logprobs",
|
|
1866
|
+
"top_logprobs",
|
|
1867
|
+
"n",
|
|
1868
|
+
"presence_penalty",
|
|
1869
|
+
"seed",
|
|
1870
|
+
"stop",
|
|
1871
|
+
"stream_options",
|
|
1872
|
+
"temperature",
|
|
1873
|
+
"top_p",
|
|
1874
|
+
"top_k",
|
|
1875
|
+
"modalities",
|
|
1876
|
+
]
|
|
1877
|
+
)
|
|
1300
1878
|
serialized = handler(self)
|
|
1301
|
-
|
|
1302
1879
|
m = {}
|
|
1303
1880
|
|
|
1304
1881
|
for n, f in type(self).model_fields.items():
|
|
1305
1882
|
k = f.alias or n
|
|
1306
1883
|
val = serialized.get(k)
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
):
|
|
1320
|
-
m[k] = val
|
|
1884
|
+
is_nullable_and_explicitly_set = (
|
|
1885
|
+
k in nullable_fields
|
|
1886
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1887
|
+
)
|
|
1888
|
+
|
|
1889
|
+
if val != UNSET_SENTINEL:
|
|
1890
|
+
if (
|
|
1891
|
+
val is not None
|
|
1892
|
+
or k not in optional_fields
|
|
1893
|
+
or is_nullable_and_explicitly_set
|
|
1894
|
+
):
|
|
1895
|
+
m[k] = val
|
|
1321
1896
|
|
|
1322
1897
|
return m
|
|
1323
1898
|
|
|
@@ -1340,6 +1915,22 @@ class RetrieveAgentRequestFallbackModelConfigurationRetry(BaseModel):
|
|
|
1340
1915
|
on_codes: Optional[List[float]] = None
|
|
1341
1916
|
r"""HTTP status codes that trigger retry logic"""
|
|
1342
1917
|
|
|
1918
|
+
@model_serializer(mode="wrap")
|
|
1919
|
+
def serialize_model(self, handler):
|
|
1920
|
+
optional_fields = set(["count", "on_codes"])
|
|
1921
|
+
serialized = handler(self)
|
|
1922
|
+
m = {}
|
|
1923
|
+
|
|
1924
|
+
for n, f in type(self).model_fields.items():
|
|
1925
|
+
k = f.alias or n
|
|
1926
|
+
val = serialized.get(k)
|
|
1927
|
+
|
|
1928
|
+
if val != UNSET_SENTINEL:
|
|
1929
|
+
if val is not None or k not in optional_fields:
|
|
1930
|
+
m[k] = val
|
|
1931
|
+
|
|
1932
|
+
return m
|
|
1933
|
+
|
|
1343
1934
|
|
|
1344
1935
|
class RetrieveAgentRequestFallbackModelConfiguration2TypedDict(TypedDict):
|
|
1345
1936
|
r"""Fallback model configuration with optional parameters and retry settings."""
|
|
@@ -1368,6 +1959,22 @@ class RetrieveAgentRequestFallbackModelConfiguration2(BaseModel):
|
|
|
1368
1959
|
retry: Optional[RetrieveAgentRequestFallbackModelConfigurationRetry] = None
|
|
1369
1960
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1370
1961
|
|
|
1962
|
+
@model_serializer(mode="wrap")
|
|
1963
|
+
def serialize_model(self, handler):
|
|
1964
|
+
optional_fields = set(["parameters", "retry"])
|
|
1965
|
+
serialized = handler(self)
|
|
1966
|
+
m = {}
|
|
1967
|
+
|
|
1968
|
+
for n, f in type(self).model_fields.items():
|
|
1969
|
+
k = f.alias or n
|
|
1970
|
+
val = serialized.get(k)
|
|
1971
|
+
|
|
1972
|
+
if val != UNSET_SENTINEL:
|
|
1973
|
+
if val is not None or k not in optional_fields:
|
|
1974
|
+
m[k] = val
|
|
1975
|
+
|
|
1976
|
+
return m
|
|
1977
|
+
|
|
1371
1978
|
|
|
1372
1979
|
RetrieveAgentRequestFallbackModelConfigurationTypedDict = TypeAliasType(
|
|
1373
1980
|
"RetrieveAgentRequestFallbackModelConfigurationTypedDict",
|
|
@@ -1418,31 +2025,28 @@ class RetrieveAgentRequestModel(BaseModel):
|
|
|
1418
2025
|
|
|
1419
2026
|
@model_serializer(mode="wrap")
|
|
1420
2027
|
def serialize_model(self, handler):
|
|
1421
|
-
optional_fields =
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
2028
|
+
optional_fields = set(
|
|
2029
|
+
["integration_id", "parameters", "retry", "fallback_models"]
|
|
2030
|
+
)
|
|
2031
|
+
nullable_fields = set(["integration_id", "fallback_models"])
|
|
1425
2032
|
serialized = handler(self)
|
|
1426
|
-
|
|
1427
2033
|
m = {}
|
|
1428
2034
|
|
|
1429
2035
|
for n, f in type(self).model_fields.items():
|
|
1430
2036
|
k = f.alias or n
|
|
1431
2037
|
val = serialized.get(k)
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
):
|
|
1445
|
-
m[k] = val
|
|
2038
|
+
is_nullable_and_explicitly_set = (
|
|
2039
|
+
k in nullable_fields
|
|
2040
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2041
|
+
)
|
|
2042
|
+
|
|
2043
|
+
if val != UNSET_SENTINEL:
|
|
2044
|
+
if (
|
|
2045
|
+
val is not None
|
|
2046
|
+
or k not in optional_fields
|
|
2047
|
+
or is_nullable_and_explicitly_set
|
|
2048
|
+
):
|
|
2049
|
+
m[k] = val
|
|
1446
2050
|
|
|
1447
2051
|
return m
|
|
1448
2052
|
|
|
@@ -1461,6 +2065,22 @@ class RetrieveAgentRequestTeamOfAgents(BaseModel):
|
|
|
1461
2065
|
role: Optional[str] = None
|
|
1462
2066
|
r"""The role of the agent in this context. This is used to give extra information to the leader to help it decide which agent to hand off to."""
|
|
1463
2067
|
|
|
2068
|
+
@model_serializer(mode="wrap")
|
|
2069
|
+
def serialize_model(self, handler):
|
|
2070
|
+
optional_fields = set(["role"])
|
|
2071
|
+
serialized = handler(self)
|
|
2072
|
+
m = {}
|
|
2073
|
+
|
|
2074
|
+
for n, f in type(self).model_fields.items():
|
|
2075
|
+
k = f.alias or n
|
|
2076
|
+
val = serialized.get(k)
|
|
2077
|
+
|
|
2078
|
+
if val != UNSET_SENTINEL:
|
|
2079
|
+
if val is not None or k not in optional_fields:
|
|
2080
|
+
m[k] = val
|
|
2081
|
+
|
|
2082
|
+
return m
|
|
2083
|
+
|
|
1464
2084
|
|
|
1465
2085
|
class RetrieveAgentRequestMetricsTypedDict(TypedDict):
|
|
1466
2086
|
total_cost: NotRequired[float]
|
|
@@ -1469,6 +2089,22 @@ class RetrieveAgentRequestMetricsTypedDict(TypedDict):
|
|
|
1469
2089
|
class RetrieveAgentRequestMetrics(BaseModel):
|
|
1470
2090
|
total_cost: Optional[float] = 0
|
|
1471
2091
|
|
|
2092
|
+
@model_serializer(mode="wrap")
|
|
2093
|
+
def serialize_model(self, handler):
|
|
2094
|
+
optional_fields = set(["total_cost"])
|
|
2095
|
+
serialized = handler(self)
|
|
2096
|
+
m = {}
|
|
2097
|
+
|
|
2098
|
+
for n, f in type(self).model_fields.items():
|
|
2099
|
+
k = f.alias or n
|
|
2100
|
+
val = serialized.get(k)
|
|
2101
|
+
|
|
2102
|
+
if val != UNSET_SENTINEL:
|
|
2103
|
+
if val is not None or k not in optional_fields:
|
|
2104
|
+
m[k] = val
|
|
2105
|
+
|
|
2106
|
+
return m
|
|
2107
|
+
|
|
1472
2108
|
|
|
1473
2109
|
class RetrieveAgentRequestKnowledgeBasesTypedDict(TypedDict):
|
|
1474
2110
|
knowledge_id: str
|
|
@@ -1593,42 +2229,39 @@ class RetrieveAgentRequestResponseBody(BaseModel):
|
|
|
1593
2229
|
|
|
1594
2230
|
@model_serializer(mode="wrap")
|
|
1595
2231
|
def serialize_model(self, handler):
|
|
1596
|
-
optional_fields =
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
2232
|
+
optional_fields = set(
|
|
2233
|
+
[
|
|
2234
|
+
"created_by_id",
|
|
2235
|
+
"updated_by_id",
|
|
2236
|
+
"created",
|
|
2237
|
+
"updated",
|
|
2238
|
+
"system_prompt",
|
|
2239
|
+
"settings",
|
|
2240
|
+
"version_hash",
|
|
2241
|
+
"metrics",
|
|
2242
|
+
"variables",
|
|
2243
|
+
"knowledge_bases",
|
|
2244
|
+
"source",
|
|
2245
|
+
]
|
|
2246
|
+
)
|
|
2247
|
+
nullable_fields = set(["created_by_id", "updated_by_id"])
|
|
1612
2248
|
serialized = handler(self)
|
|
1613
|
-
|
|
1614
2249
|
m = {}
|
|
1615
2250
|
|
|
1616
2251
|
for n, f in type(self).model_fields.items():
|
|
1617
2252
|
k = f.alias or n
|
|
1618
2253
|
val = serialized.get(k)
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
):
|
|
1632
|
-
m[k] = val
|
|
2254
|
+
is_nullable_and_explicitly_set = (
|
|
2255
|
+
k in nullable_fields
|
|
2256
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
2257
|
+
)
|
|
2258
|
+
|
|
2259
|
+
if val != UNSET_SENTINEL:
|
|
2260
|
+
if (
|
|
2261
|
+
val is not None
|
|
2262
|
+
or k not in optional_fields
|
|
2263
|
+
or is_nullable_and_explicitly_set
|
|
2264
|
+
):
|
|
2265
|
+
m[k] = val
|
|
1633
2266
|
|
|
1634
2267
|
return m
|