orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +332 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +258 -0
- orq_ai_sdk/embeddings.py +238 -0
- orq_ai_sdk/generations.py +272 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5341 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1922 -384
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1375 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +1890 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +384 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +504 -0
- orq_ai_sdk/models/createimageop.py +208 -117
- orq_ai_sdk/models/createimagevariationop.py +486 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2748 -1252
- orq_ai_sdk/models/creatererankop.py +416 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +316 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +562 -0
- orq_ai_sdk/models/createtranslationop.py +540 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1690 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1673 -230
- orq_ai_sdk/models/getpromptversionop.py +1670 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +793 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1684 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1451 -197
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1428 -195
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1951 -404
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2844 -1450
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +660 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +232 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +251 -0
- orq_ai_sdk/transcriptions.py +326 -0
- orq_ai_sdk/translations.py +298 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +254 -0
- orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
- orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
orq_ai_sdk/models/runagentop.py
CHANGED
|
@@ -102,6 +102,22 @@ class RunAgentResponseFormatAgentsJSONSchema(BaseModel):
|
|
|
102
102
|
strict: Optional[bool] = False
|
|
103
103
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
104
104
|
|
|
105
|
+
@model_serializer(mode="wrap")
|
|
106
|
+
def serialize_model(self, handler):
|
|
107
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
108
|
+
serialized = handler(self)
|
|
109
|
+
m = {}
|
|
110
|
+
|
|
111
|
+
for n, f in type(self).model_fields.items():
|
|
112
|
+
k = f.alias or n
|
|
113
|
+
val = serialized.get(k)
|
|
114
|
+
|
|
115
|
+
if val != UNSET_SENTINEL:
|
|
116
|
+
if val is not None or k not in optional_fields:
|
|
117
|
+
m[k] = val
|
|
118
|
+
|
|
119
|
+
return m
|
|
120
|
+
|
|
105
121
|
|
|
106
122
|
class RunAgentResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
107
123
|
r"""
|
|
@@ -232,6 +248,22 @@ class RunAgentModelConfigurationStreamOptions(BaseModel):
|
|
|
232
248
|
include_usage: Optional[bool] = None
|
|
233
249
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
234
250
|
|
|
251
|
+
@model_serializer(mode="wrap")
|
|
252
|
+
def serialize_model(self, handler):
|
|
253
|
+
optional_fields = set(["include_usage"])
|
|
254
|
+
serialized = handler(self)
|
|
255
|
+
m = {}
|
|
256
|
+
|
|
257
|
+
for n, f in type(self).model_fields.items():
|
|
258
|
+
k = f.alias or n
|
|
259
|
+
val = serialized.get(k)
|
|
260
|
+
|
|
261
|
+
if val != UNSET_SENTINEL:
|
|
262
|
+
if val is not None or k not in optional_fields:
|
|
263
|
+
m[k] = val
|
|
264
|
+
|
|
265
|
+
return m
|
|
266
|
+
|
|
235
267
|
|
|
236
268
|
RunAgentModelConfigurationThinkingTypedDict = TypeAliasType(
|
|
237
269
|
"RunAgentModelConfigurationThinkingTypedDict",
|
|
@@ -274,6 +306,22 @@ class RunAgentToolChoice2(BaseModel):
|
|
|
274
306
|
type: Optional[RunAgentToolChoiceType] = None
|
|
275
307
|
r"""The type of the tool. Currently, only function is supported."""
|
|
276
308
|
|
|
309
|
+
@model_serializer(mode="wrap")
|
|
310
|
+
def serialize_model(self, handler):
|
|
311
|
+
optional_fields = set(["type"])
|
|
312
|
+
serialized = handler(self)
|
|
313
|
+
m = {}
|
|
314
|
+
|
|
315
|
+
for n, f in type(self).model_fields.items():
|
|
316
|
+
k = f.alias or n
|
|
317
|
+
val = serialized.get(k)
|
|
318
|
+
|
|
319
|
+
if val != UNSET_SENTINEL:
|
|
320
|
+
if val is not None or k not in optional_fields:
|
|
321
|
+
m[k] = val
|
|
322
|
+
|
|
323
|
+
return m
|
|
324
|
+
|
|
277
325
|
|
|
278
326
|
RunAgentToolChoice1 = Literal[
|
|
279
327
|
"none",
|
|
@@ -340,6 +388,154 @@ class RunAgentModelConfigurationGuardrails(BaseModel):
|
|
|
340
388
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
341
389
|
|
|
342
390
|
|
|
391
|
+
class RunAgentModelConfigurationFallbacksTypedDict(TypedDict):
|
|
392
|
+
model: str
|
|
393
|
+
r"""Fallback model identifier"""
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
class RunAgentModelConfigurationFallbacks(BaseModel):
|
|
397
|
+
model: str
|
|
398
|
+
r"""Fallback model identifier"""
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
class RunAgentModelConfigurationRetryTypedDict(TypedDict):
|
|
402
|
+
r"""Retry configuration for the request"""
|
|
403
|
+
|
|
404
|
+
count: NotRequired[float]
|
|
405
|
+
r"""Number of retry attempts (1-5)"""
|
|
406
|
+
on_codes: NotRequired[List[float]]
|
|
407
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
class RunAgentModelConfigurationRetry(BaseModel):
|
|
411
|
+
r"""Retry configuration for the request"""
|
|
412
|
+
|
|
413
|
+
count: Optional[float] = 3
|
|
414
|
+
r"""Number of retry attempts (1-5)"""
|
|
415
|
+
|
|
416
|
+
on_codes: Optional[List[float]] = None
|
|
417
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
418
|
+
|
|
419
|
+
@model_serializer(mode="wrap")
|
|
420
|
+
def serialize_model(self, handler):
|
|
421
|
+
optional_fields = set(["count", "on_codes"])
|
|
422
|
+
serialized = handler(self)
|
|
423
|
+
m = {}
|
|
424
|
+
|
|
425
|
+
for n, f in type(self).model_fields.items():
|
|
426
|
+
k = f.alias or n
|
|
427
|
+
val = serialized.get(k)
|
|
428
|
+
|
|
429
|
+
if val != UNSET_SENTINEL:
|
|
430
|
+
if val is not None or k not in optional_fields:
|
|
431
|
+
m[k] = val
|
|
432
|
+
|
|
433
|
+
return m
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
RunAgentModelConfigurationType = Literal["exact_match",]
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
class RunAgentModelConfigurationCacheTypedDict(TypedDict):
|
|
440
|
+
r"""Cache configuration for the request."""
|
|
441
|
+
|
|
442
|
+
type: RunAgentModelConfigurationType
|
|
443
|
+
ttl: NotRequired[float]
|
|
444
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
class RunAgentModelConfigurationCache(BaseModel):
|
|
448
|
+
r"""Cache configuration for the request."""
|
|
449
|
+
|
|
450
|
+
type: RunAgentModelConfigurationType
|
|
451
|
+
|
|
452
|
+
ttl: Optional[float] = 1800
|
|
453
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
454
|
+
|
|
455
|
+
@model_serializer(mode="wrap")
|
|
456
|
+
def serialize_model(self, handler):
|
|
457
|
+
optional_fields = set(["ttl"])
|
|
458
|
+
serialized = handler(self)
|
|
459
|
+
m = {}
|
|
460
|
+
|
|
461
|
+
for n, f in type(self).model_fields.items():
|
|
462
|
+
k = f.alias or n
|
|
463
|
+
val = serialized.get(k)
|
|
464
|
+
|
|
465
|
+
if val != UNSET_SENTINEL:
|
|
466
|
+
if val is not None or k not in optional_fields:
|
|
467
|
+
m[k] = val
|
|
468
|
+
|
|
469
|
+
return m
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
RunAgentLoadBalancerType = Literal["weight_based",]
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
class RunAgentLoadBalancerModelsTypedDict(TypedDict):
|
|
476
|
+
model: str
|
|
477
|
+
r"""Model identifier for load balancing"""
|
|
478
|
+
weight: NotRequired[float]
|
|
479
|
+
r"""Weight assigned to this model for load balancing"""
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
class RunAgentLoadBalancerModels(BaseModel):
|
|
483
|
+
model: str
|
|
484
|
+
r"""Model identifier for load balancing"""
|
|
485
|
+
|
|
486
|
+
weight: Optional[float] = 0.5
|
|
487
|
+
r"""Weight assigned to this model for load balancing"""
|
|
488
|
+
|
|
489
|
+
@model_serializer(mode="wrap")
|
|
490
|
+
def serialize_model(self, handler):
|
|
491
|
+
optional_fields = set(["weight"])
|
|
492
|
+
serialized = handler(self)
|
|
493
|
+
m = {}
|
|
494
|
+
|
|
495
|
+
for n, f in type(self).model_fields.items():
|
|
496
|
+
k = f.alias or n
|
|
497
|
+
val = serialized.get(k)
|
|
498
|
+
|
|
499
|
+
if val != UNSET_SENTINEL:
|
|
500
|
+
if val is not None or k not in optional_fields:
|
|
501
|
+
m[k] = val
|
|
502
|
+
|
|
503
|
+
return m
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
class RunAgentLoadBalancer1TypedDict(TypedDict):
|
|
507
|
+
type: RunAgentLoadBalancerType
|
|
508
|
+
models: List[RunAgentLoadBalancerModelsTypedDict]
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
class RunAgentLoadBalancer1(BaseModel):
|
|
512
|
+
type: RunAgentLoadBalancerType
|
|
513
|
+
|
|
514
|
+
models: List[RunAgentLoadBalancerModels]
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
RunAgentModelConfigurationLoadBalancerTypedDict = RunAgentLoadBalancer1TypedDict
|
|
518
|
+
r"""Load balancer configuration for the request."""
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
RunAgentModelConfigurationLoadBalancer = RunAgentLoadBalancer1
|
|
522
|
+
r"""Load balancer configuration for the request."""
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
class RunAgentModelConfigurationTimeoutTypedDict(TypedDict):
|
|
526
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
527
|
+
|
|
528
|
+
call_timeout: float
|
|
529
|
+
r"""Timeout value in milliseconds"""
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
class RunAgentModelConfigurationTimeout(BaseModel):
|
|
533
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
534
|
+
|
|
535
|
+
call_timeout: float
|
|
536
|
+
r"""Timeout value in milliseconds"""
|
|
537
|
+
|
|
538
|
+
|
|
343
539
|
class RunAgentModelConfigurationParametersTypedDict(TypedDict):
|
|
344
540
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
345
541
|
|
|
@@ -399,6 +595,16 @@ class RunAgentModelConfigurationParametersTypedDict(TypedDict):
|
|
|
399
595
|
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
400
596
|
guardrails: NotRequired[List[RunAgentModelConfigurationGuardrailsTypedDict]]
|
|
401
597
|
r"""A list of guardrails to apply to the request."""
|
|
598
|
+
fallbacks: NotRequired[List[RunAgentModelConfigurationFallbacksTypedDict]]
|
|
599
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
600
|
+
retry: NotRequired[RunAgentModelConfigurationRetryTypedDict]
|
|
601
|
+
r"""Retry configuration for the request"""
|
|
602
|
+
cache: NotRequired[RunAgentModelConfigurationCacheTypedDict]
|
|
603
|
+
r"""Cache configuration for the request."""
|
|
604
|
+
load_balancer: NotRequired[RunAgentModelConfigurationLoadBalancerTypedDict]
|
|
605
|
+
r"""Load balancer configuration for the request."""
|
|
606
|
+
timeout: NotRequired[RunAgentModelConfigurationTimeoutTypedDict]
|
|
607
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
402
608
|
|
|
403
609
|
|
|
404
610
|
class RunAgentModelConfigurationParameters(BaseModel):
|
|
@@ -480,77 +686,96 @@ class RunAgentModelConfigurationParameters(BaseModel):
|
|
|
480
686
|
guardrails: Optional[List[RunAgentModelConfigurationGuardrails]] = None
|
|
481
687
|
r"""A list of guardrails to apply to the request."""
|
|
482
688
|
|
|
689
|
+
fallbacks: Optional[List[RunAgentModelConfigurationFallbacks]] = None
|
|
690
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
691
|
+
|
|
692
|
+
retry: Optional[RunAgentModelConfigurationRetry] = None
|
|
693
|
+
r"""Retry configuration for the request"""
|
|
694
|
+
|
|
695
|
+
cache: Optional[RunAgentModelConfigurationCache] = None
|
|
696
|
+
r"""Cache configuration for the request."""
|
|
697
|
+
|
|
698
|
+
load_balancer: Optional[RunAgentModelConfigurationLoadBalancer] = None
|
|
699
|
+
r"""Load balancer configuration for the request."""
|
|
700
|
+
|
|
701
|
+
timeout: Optional[RunAgentModelConfigurationTimeout] = None
|
|
702
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
703
|
+
|
|
483
704
|
@model_serializer(mode="wrap")
|
|
484
705
|
def serialize_model(self, handler):
|
|
485
|
-
optional_fields =
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
706
|
+
optional_fields = set(
|
|
707
|
+
[
|
|
708
|
+
"audio",
|
|
709
|
+
"frequency_penalty",
|
|
710
|
+
"max_tokens",
|
|
711
|
+
"max_completion_tokens",
|
|
712
|
+
"logprobs",
|
|
713
|
+
"top_logprobs",
|
|
714
|
+
"n",
|
|
715
|
+
"presence_penalty",
|
|
716
|
+
"response_format",
|
|
717
|
+
"reasoning_effort",
|
|
718
|
+
"verbosity",
|
|
719
|
+
"seed",
|
|
720
|
+
"stop",
|
|
721
|
+
"stream_options",
|
|
722
|
+
"thinking",
|
|
723
|
+
"temperature",
|
|
724
|
+
"top_p",
|
|
725
|
+
"top_k",
|
|
726
|
+
"tool_choice",
|
|
727
|
+
"parallel_tool_calls",
|
|
728
|
+
"modalities",
|
|
729
|
+
"guardrails",
|
|
730
|
+
"fallbacks",
|
|
731
|
+
"retry",
|
|
732
|
+
"cache",
|
|
733
|
+
"load_balancer",
|
|
734
|
+
"timeout",
|
|
735
|
+
]
|
|
736
|
+
)
|
|
737
|
+
nullable_fields = set(
|
|
738
|
+
[
|
|
739
|
+
"audio",
|
|
740
|
+
"frequency_penalty",
|
|
741
|
+
"max_tokens",
|
|
742
|
+
"max_completion_tokens",
|
|
743
|
+
"logprobs",
|
|
744
|
+
"top_logprobs",
|
|
745
|
+
"n",
|
|
746
|
+
"presence_penalty",
|
|
747
|
+
"seed",
|
|
748
|
+
"stop",
|
|
749
|
+
"stream_options",
|
|
750
|
+
"temperature",
|
|
751
|
+
"top_p",
|
|
752
|
+
"top_k",
|
|
753
|
+
"modalities",
|
|
754
|
+
]
|
|
755
|
+
)
|
|
528
756
|
serialized = handler(self)
|
|
529
|
-
|
|
530
757
|
m = {}
|
|
531
758
|
|
|
532
759
|
for n, f in type(self).model_fields.items():
|
|
533
760
|
k = f.alias or n
|
|
534
761
|
val = serialized.get(k)
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
):
|
|
548
|
-
m[k] = val
|
|
762
|
+
is_nullable_and_explicitly_set = (
|
|
763
|
+
k in nullable_fields
|
|
764
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
if val != UNSET_SENTINEL:
|
|
768
|
+
if (
|
|
769
|
+
val is not None
|
|
770
|
+
or k not in optional_fields
|
|
771
|
+
or is_nullable_and_explicitly_set
|
|
772
|
+
):
|
|
773
|
+
m[k] = val
|
|
549
774
|
|
|
550
775
|
return m
|
|
551
776
|
|
|
552
777
|
|
|
553
|
-
class
|
|
778
|
+
class RunAgentModelConfigurationAgentsRetryTypedDict(TypedDict):
|
|
554
779
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
555
780
|
|
|
556
781
|
count: NotRequired[float]
|
|
@@ -559,7 +784,7 @@ class RunAgentModelConfigurationRetryTypedDict(TypedDict):
|
|
|
559
784
|
r"""HTTP status codes that trigger retry logic"""
|
|
560
785
|
|
|
561
786
|
|
|
562
|
-
class
|
|
787
|
+
class RunAgentModelConfigurationAgentsRetry(BaseModel):
|
|
563
788
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
564
789
|
|
|
565
790
|
count: Optional[float] = 3
|
|
@@ -568,6 +793,22 @@ class RunAgentModelConfigurationRetry(BaseModel):
|
|
|
568
793
|
on_codes: Optional[List[float]] = None
|
|
569
794
|
r"""HTTP status codes that trigger retry logic"""
|
|
570
795
|
|
|
796
|
+
@model_serializer(mode="wrap")
|
|
797
|
+
def serialize_model(self, handler):
|
|
798
|
+
optional_fields = set(["count", "on_codes"])
|
|
799
|
+
serialized = handler(self)
|
|
800
|
+
m = {}
|
|
801
|
+
|
|
802
|
+
for n, f in type(self).model_fields.items():
|
|
803
|
+
k = f.alias or n
|
|
804
|
+
val = serialized.get(k)
|
|
805
|
+
|
|
806
|
+
if val != UNSET_SENTINEL:
|
|
807
|
+
if val is not None or k not in optional_fields:
|
|
808
|
+
m[k] = val
|
|
809
|
+
|
|
810
|
+
return m
|
|
811
|
+
|
|
571
812
|
|
|
572
813
|
class RunAgentModelConfiguration2TypedDict(TypedDict):
|
|
573
814
|
r"""
|
|
@@ -579,7 +820,7 @@ class RunAgentModelConfiguration2TypedDict(TypedDict):
|
|
|
579
820
|
r"""A model ID string (e.g., `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`). Only models that support tool calling can be used with agents."""
|
|
580
821
|
parameters: NotRequired[RunAgentModelConfigurationParametersTypedDict]
|
|
581
822
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
582
|
-
retry: NotRequired[
|
|
823
|
+
retry: NotRequired[RunAgentModelConfigurationAgentsRetryTypedDict]
|
|
583
824
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
584
825
|
|
|
585
826
|
|
|
@@ -595,9 +836,25 @@ class RunAgentModelConfiguration2(BaseModel):
|
|
|
595
836
|
parameters: Optional[RunAgentModelConfigurationParameters] = None
|
|
596
837
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
597
838
|
|
|
598
|
-
retry: Optional[
|
|
839
|
+
retry: Optional[RunAgentModelConfigurationAgentsRetry] = None
|
|
599
840
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
600
841
|
|
|
842
|
+
@model_serializer(mode="wrap")
|
|
843
|
+
def serialize_model(self, handler):
|
|
844
|
+
optional_fields = set(["parameters", "retry"])
|
|
845
|
+
serialized = handler(self)
|
|
846
|
+
m = {}
|
|
847
|
+
|
|
848
|
+
for n, f in type(self).model_fields.items():
|
|
849
|
+
k = f.alias or n
|
|
850
|
+
val = serialized.get(k)
|
|
851
|
+
|
|
852
|
+
if val != UNSET_SENTINEL:
|
|
853
|
+
if val is not None or k not in optional_fields:
|
|
854
|
+
m[k] = val
|
|
855
|
+
|
|
856
|
+
return m
|
|
857
|
+
|
|
601
858
|
|
|
602
859
|
RunAgentModelConfigurationTypedDict = TypeAliasType(
|
|
603
860
|
"RunAgentModelConfigurationTypedDict",
|
|
@@ -683,6 +940,22 @@ class RunAgentResponseFormatAgentsRequestRequestBodyJSONSchema(BaseModel):
|
|
|
683
940
|
strict: Optional[bool] = False
|
|
684
941
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
685
942
|
|
|
943
|
+
@model_serializer(mode="wrap")
|
|
944
|
+
def serialize_model(self, handler):
|
|
945
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
946
|
+
serialized = handler(self)
|
|
947
|
+
m = {}
|
|
948
|
+
|
|
949
|
+
for n, f in type(self).model_fields.items():
|
|
950
|
+
k = f.alias or n
|
|
951
|
+
val = serialized.get(k)
|
|
952
|
+
|
|
953
|
+
if val != UNSET_SENTINEL:
|
|
954
|
+
if val is not None or k not in optional_fields:
|
|
955
|
+
m[k] = val
|
|
956
|
+
|
|
957
|
+
return m
|
|
958
|
+
|
|
686
959
|
|
|
687
960
|
class RunAgentResponseFormatAgentsRequestJSONSchemaTypedDict(TypedDict):
|
|
688
961
|
r"""
|
|
@@ -815,6 +1088,22 @@ class RunAgentFallbackModelConfigurationStreamOptions(BaseModel):
|
|
|
815
1088
|
include_usage: Optional[bool] = None
|
|
816
1089
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
817
1090
|
|
|
1091
|
+
@model_serializer(mode="wrap")
|
|
1092
|
+
def serialize_model(self, handler):
|
|
1093
|
+
optional_fields = set(["include_usage"])
|
|
1094
|
+
serialized = handler(self)
|
|
1095
|
+
m = {}
|
|
1096
|
+
|
|
1097
|
+
for n, f in type(self).model_fields.items():
|
|
1098
|
+
k = f.alias or n
|
|
1099
|
+
val = serialized.get(k)
|
|
1100
|
+
|
|
1101
|
+
if val != UNSET_SENTINEL:
|
|
1102
|
+
if val is not None or k not in optional_fields:
|
|
1103
|
+
m[k] = val
|
|
1104
|
+
|
|
1105
|
+
return m
|
|
1106
|
+
|
|
818
1107
|
|
|
819
1108
|
RunAgentFallbackModelConfigurationThinkingTypedDict = TypeAliasType(
|
|
820
1109
|
"RunAgentFallbackModelConfigurationThinkingTypedDict",
|
|
@@ -857,6 +1146,22 @@ class RunAgentToolChoiceAgents2(BaseModel):
|
|
|
857
1146
|
type: Optional[RunAgentToolChoiceAgentsType] = None
|
|
858
1147
|
r"""The type of the tool. Currently, only function is supported."""
|
|
859
1148
|
|
|
1149
|
+
@model_serializer(mode="wrap")
|
|
1150
|
+
def serialize_model(self, handler):
|
|
1151
|
+
optional_fields = set(["type"])
|
|
1152
|
+
serialized = handler(self)
|
|
1153
|
+
m = {}
|
|
1154
|
+
|
|
1155
|
+
for n, f in type(self).model_fields.items():
|
|
1156
|
+
k = f.alias or n
|
|
1157
|
+
val = serialized.get(k)
|
|
1158
|
+
|
|
1159
|
+
if val != UNSET_SENTINEL:
|
|
1160
|
+
if val is not None or k not in optional_fields:
|
|
1161
|
+
m[k] = val
|
|
1162
|
+
|
|
1163
|
+
return m
|
|
1164
|
+
|
|
860
1165
|
|
|
861
1166
|
RunAgentToolChoiceAgents1 = Literal[
|
|
862
1167
|
"none",
|
|
@@ -923,69 +1228,229 @@ class RunAgentFallbackModelConfigurationGuardrails(BaseModel):
|
|
|
923
1228
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
924
1229
|
|
|
925
1230
|
|
|
926
|
-
class
|
|
927
|
-
|
|
1231
|
+
class RunAgentFallbackModelConfigurationFallbacksTypedDict(TypedDict):
|
|
1232
|
+
model: str
|
|
1233
|
+
r"""Fallback model identifier"""
|
|
928
1234
|
|
|
929
|
-
audio: NotRequired[Nullable[RunAgentFallbackModelConfigurationAudioTypedDict]]
|
|
930
|
-
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
931
|
-
frequency_penalty: NotRequired[Nullable[float]]
|
|
932
|
-
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
933
|
-
max_tokens: NotRequired[Nullable[int]]
|
|
934
|
-
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
935
1235
|
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
940
|
-
logprobs: NotRequired[Nullable[bool]]
|
|
941
|
-
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
942
|
-
top_logprobs: NotRequired[Nullable[int]]
|
|
943
|
-
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
944
|
-
n: NotRequired[Nullable[int]]
|
|
945
|
-
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
946
|
-
presence_penalty: NotRequired[Nullable[float]]
|
|
947
|
-
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
948
|
-
response_format: NotRequired[
|
|
949
|
-
RunAgentFallbackModelConfigurationResponseFormatTypedDict
|
|
950
|
-
]
|
|
951
|
-
r"""An object specifying the format that the model must output"""
|
|
952
|
-
reasoning_effort: NotRequired[RunAgentFallbackModelConfigurationReasoningEffort]
|
|
953
|
-
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1236
|
+
class RunAgentFallbackModelConfigurationFallbacks(BaseModel):
|
|
1237
|
+
model: str
|
|
1238
|
+
r"""Fallback model identifier"""
|
|
954
1239
|
|
|
955
|
-
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
956
|
-
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
957
|
-
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
958
|
-
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
959
1240
|
|
|
960
|
-
|
|
961
|
-
"""
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
r"""
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
1241
|
+
class RunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
|
|
1242
|
+
r"""Retry configuration for the request"""
|
|
1243
|
+
|
|
1244
|
+
count: NotRequired[float]
|
|
1245
|
+
r"""Number of retry attempts (1-5)"""
|
|
1246
|
+
on_codes: NotRequired[List[float]]
|
|
1247
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
class RunAgentFallbackModelConfigurationRetry(BaseModel):
|
|
1251
|
+
r"""Retry configuration for the request"""
|
|
1252
|
+
|
|
1253
|
+
count: Optional[float] = 3
|
|
1254
|
+
r"""Number of retry attempts (1-5)"""
|
|
1255
|
+
|
|
1256
|
+
on_codes: Optional[List[float]] = None
|
|
1257
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1258
|
+
|
|
1259
|
+
@model_serializer(mode="wrap")
|
|
1260
|
+
def serialize_model(self, handler):
|
|
1261
|
+
optional_fields = set(["count", "on_codes"])
|
|
1262
|
+
serialized = handler(self)
|
|
1263
|
+
m = {}
|
|
1264
|
+
|
|
1265
|
+
for n, f in type(self).model_fields.items():
|
|
1266
|
+
k = f.alias or n
|
|
1267
|
+
val = serialized.get(k)
|
|
1268
|
+
|
|
1269
|
+
if val != UNSET_SENTINEL:
|
|
1270
|
+
if val is not None or k not in optional_fields:
|
|
1271
|
+
m[k] = val
|
|
1272
|
+
|
|
1273
|
+
return m
|
|
1274
|
+
|
|
1275
|
+
|
|
1276
|
+
RunAgentFallbackModelConfigurationType = Literal["exact_match",]
|
|
1277
|
+
|
|
1278
|
+
|
|
1279
|
+
class RunAgentFallbackModelConfigurationCacheTypedDict(TypedDict):
|
|
1280
|
+
r"""Cache configuration for the request."""
|
|
1281
|
+
|
|
1282
|
+
type: RunAgentFallbackModelConfigurationType
|
|
1283
|
+
ttl: NotRequired[float]
|
|
1284
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1285
|
+
|
|
1286
|
+
|
|
1287
|
+
class RunAgentFallbackModelConfigurationCache(BaseModel):
|
|
1288
|
+
r"""Cache configuration for the request."""
|
|
1289
|
+
|
|
1290
|
+
type: RunAgentFallbackModelConfigurationType
|
|
1291
|
+
|
|
1292
|
+
ttl: Optional[float] = 1800
|
|
1293
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1294
|
+
|
|
1295
|
+
@model_serializer(mode="wrap")
|
|
1296
|
+
def serialize_model(self, handler):
|
|
1297
|
+
optional_fields = set(["ttl"])
|
|
1298
|
+
serialized = handler(self)
|
|
1299
|
+
m = {}
|
|
1300
|
+
|
|
1301
|
+
for n, f in type(self).model_fields.items():
|
|
1302
|
+
k = f.alias or n
|
|
1303
|
+
val = serialized.get(k)
|
|
1304
|
+
|
|
1305
|
+
if val != UNSET_SENTINEL:
|
|
1306
|
+
if val is not None or k not in optional_fields:
|
|
1307
|
+
m[k] = val
|
|
1308
|
+
|
|
1309
|
+
return m
|
|
1310
|
+
|
|
1311
|
+
|
|
1312
|
+
RunAgentLoadBalancerAgentsType = Literal["weight_based",]
|
|
1313
|
+
|
|
1314
|
+
|
|
1315
|
+
class RunAgentLoadBalancerAgentsModelsTypedDict(TypedDict):
|
|
1316
|
+
model: str
|
|
1317
|
+
r"""Model identifier for load balancing"""
|
|
1318
|
+
weight: NotRequired[float]
|
|
1319
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1320
|
+
|
|
1321
|
+
|
|
1322
|
+
class RunAgentLoadBalancerAgentsModels(BaseModel):
|
|
1323
|
+
model: str
|
|
1324
|
+
r"""Model identifier for load balancing"""
|
|
1325
|
+
|
|
1326
|
+
weight: Optional[float] = 0.5
|
|
1327
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1328
|
+
|
|
1329
|
+
@model_serializer(mode="wrap")
|
|
1330
|
+
def serialize_model(self, handler):
|
|
1331
|
+
optional_fields = set(["weight"])
|
|
1332
|
+
serialized = handler(self)
|
|
1333
|
+
m = {}
|
|
1334
|
+
|
|
1335
|
+
for n, f in type(self).model_fields.items():
|
|
1336
|
+
k = f.alias or n
|
|
1337
|
+
val = serialized.get(k)
|
|
1338
|
+
|
|
1339
|
+
if val != UNSET_SENTINEL:
|
|
1340
|
+
if val is not None or k not in optional_fields:
|
|
1341
|
+
m[k] = val
|
|
1342
|
+
|
|
1343
|
+
return m
|
|
1344
|
+
|
|
1345
|
+
|
|
1346
|
+
class RunAgentLoadBalancerAgents1TypedDict(TypedDict):
|
|
1347
|
+
type: RunAgentLoadBalancerAgentsType
|
|
1348
|
+
models: List[RunAgentLoadBalancerAgentsModelsTypedDict]
|
|
1349
|
+
|
|
1350
|
+
|
|
1351
|
+
class RunAgentLoadBalancerAgents1(BaseModel):
|
|
1352
|
+
type: RunAgentLoadBalancerAgentsType
|
|
1353
|
+
|
|
1354
|
+
models: List[RunAgentLoadBalancerAgentsModels]
|
|
1355
|
+
|
|
1356
|
+
|
|
1357
|
+
RunAgentFallbackModelConfigurationLoadBalancerTypedDict = (
|
|
1358
|
+
RunAgentLoadBalancerAgents1TypedDict
|
|
1359
|
+
)
|
|
1360
|
+
r"""Load balancer configuration for the request."""
|
|
1361
|
+
|
|
1362
|
+
|
|
1363
|
+
RunAgentFallbackModelConfigurationLoadBalancer = RunAgentLoadBalancerAgents1
|
|
1364
|
+
r"""Load balancer configuration for the request."""
|
|
1365
|
+
|
|
1366
|
+
|
|
1367
|
+
class RunAgentFallbackModelConfigurationTimeoutTypedDict(TypedDict):
|
|
1368
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1369
|
+
|
|
1370
|
+
call_timeout: float
|
|
1371
|
+
r"""Timeout value in milliseconds"""
|
|
1372
|
+
|
|
1373
|
+
|
|
1374
|
+
class RunAgentFallbackModelConfigurationTimeout(BaseModel):
|
|
1375
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1376
|
+
|
|
1377
|
+
call_timeout: float
|
|
1378
|
+
r"""Timeout value in milliseconds"""
|
|
1379
|
+
|
|
1380
|
+
|
|
1381
|
+
class RunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
|
|
1382
|
+
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1383
|
+
|
|
1384
|
+
audio: NotRequired[Nullable[RunAgentFallbackModelConfigurationAudioTypedDict]]
|
|
1385
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1386
|
+
frequency_penalty: NotRequired[Nullable[float]]
|
|
1387
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
1388
|
+
max_tokens: NotRequired[Nullable[int]]
|
|
1389
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1390
|
+
|
|
1391
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
1392
|
+
"""
|
|
1393
|
+
max_completion_tokens: NotRequired[Nullable[int]]
|
|
1394
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
1395
|
+
logprobs: NotRequired[Nullable[bool]]
|
|
1396
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
1397
|
+
top_logprobs: NotRequired[Nullable[int]]
|
|
1398
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
1399
|
+
n: NotRequired[Nullable[int]]
|
|
1400
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
1401
|
+
presence_penalty: NotRequired[Nullable[float]]
|
|
1402
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
1403
|
+
response_format: NotRequired[
|
|
1404
|
+
RunAgentFallbackModelConfigurationResponseFormatTypedDict
|
|
1405
|
+
]
|
|
1406
|
+
r"""An object specifying the format that the model must output"""
|
|
1407
|
+
reasoning_effort: NotRequired[RunAgentFallbackModelConfigurationReasoningEffort]
|
|
1408
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1409
|
+
|
|
1410
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1411
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1412
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1413
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1414
|
+
|
|
1415
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
1416
|
+
"""
|
|
1417
|
+
verbosity: NotRequired[str]
|
|
1418
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
1419
|
+
seed: NotRequired[Nullable[float]]
|
|
1420
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
1421
|
+
stop: NotRequired[Nullable[RunAgentFallbackModelConfigurationStopTypedDict]]
|
|
1422
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1423
|
+
stream_options: NotRequired[
|
|
1424
|
+
Nullable[RunAgentFallbackModelConfigurationStreamOptionsTypedDict]
|
|
1425
|
+
]
|
|
1426
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1427
|
+
thinking: NotRequired[RunAgentFallbackModelConfigurationThinkingTypedDict]
|
|
1428
|
+
temperature: NotRequired[Nullable[float]]
|
|
1429
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
1430
|
+
top_p: NotRequired[Nullable[float]]
|
|
1431
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
1432
|
+
top_k: NotRequired[Nullable[float]]
|
|
1433
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
1434
|
+
tool_choice: NotRequired[RunAgentFallbackModelConfigurationToolChoiceTypedDict]
|
|
1435
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1436
|
+
parallel_tool_calls: NotRequired[bool]
|
|
1437
|
+
r"""Whether to enable parallel function calling during tool use."""
|
|
1438
|
+
modalities: NotRequired[
|
|
1439
|
+
Nullable[List[RunAgentFallbackModelConfigurationModalities]]
|
|
1440
|
+
]
|
|
1441
|
+
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
987
1442
|
guardrails: NotRequired[List[RunAgentFallbackModelConfigurationGuardrailsTypedDict]]
|
|
988
1443
|
r"""A list of guardrails to apply to the request."""
|
|
1444
|
+
fallbacks: NotRequired[List[RunAgentFallbackModelConfigurationFallbacksTypedDict]]
|
|
1445
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1446
|
+
retry: NotRequired[RunAgentFallbackModelConfigurationRetryTypedDict]
|
|
1447
|
+
r"""Retry configuration for the request"""
|
|
1448
|
+
cache: NotRequired[RunAgentFallbackModelConfigurationCacheTypedDict]
|
|
1449
|
+
r"""Cache configuration for the request."""
|
|
1450
|
+
load_balancer: NotRequired[RunAgentFallbackModelConfigurationLoadBalancerTypedDict]
|
|
1451
|
+
r"""Load balancer configuration for the request."""
|
|
1452
|
+
timeout: NotRequired[RunAgentFallbackModelConfigurationTimeoutTypedDict]
|
|
1453
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
989
1454
|
|
|
990
1455
|
|
|
991
1456
|
class RunAgentFallbackModelConfigurationParameters(BaseModel):
|
|
@@ -1071,77 +1536,96 @@ class RunAgentFallbackModelConfigurationParameters(BaseModel):
|
|
|
1071
1536
|
guardrails: Optional[List[RunAgentFallbackModelConfigurationGuardrails]] = None
|
|
1072
1537
|
r"""A list of guardrails to apply to the request."""
|
|
1073
1538
|
|
|
1539
|
+
fallbacks: Optional[List[RunAgentFallbackModelConfigurationFallbacks]] = None
|
|
1540
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1541
|
+
|
|
1542
|
+
retry: Optional[RunAgentFallbackModelConfigurationRetry] = None
|
|
1543
|
+
r"""Retry configuration for the request"""
|
|
1544
|
+
|
|
1545
|
+
cache: Optional[RunAgentFallbackModelConfigurationCache] = None
|
|
1546
|
+
r"""Cache configuration for the request."""
|
|
1547
|
+
|
|
1548
|
+
load_balancer: Optional[RunAgentFallbackModelConfigurationLoadBalancer] = None
|
|
1549
|
+
r"""Load balancer configuration for the request."""
|
|
1550
|
+
|
|
1551
|
+
timeout: Optional[RunAgentFallbackModelConfigurationTimeout] = None
|
|
1552
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1553
|
+
|
|
1074
1554
|
@model_serializer(mode="wrap")
|
|
1075
1555
|
def serialize_model(self, handler):
|
|
1076
|
-
optional_fields =
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1556
|
+
optional_fields = set(
|
|
1557
|
+
[
|
|
1558
|
+
"audio",
|
|
1559
|
+
"frequency_penalty",
|
|
1560
|
+
"max_tokens",
|
|
1561
|
+
"max_completion_tokens",
|
|
1562
|
+
"logprobs",
|
|
1563
|
+
"top_logprobs",
|
|
1564
|
+
"n",
|
|
1565
|
+
"presence_penalty",
|
|
1566
|
+
"response_format",
|
|
1567
|
+
"reasoning_effort",
|
|
1568
|
+
"verbosity",
|
|
1569
|
+
"seed",
|
|
1570
|
+
"stop",
|
|
1571
|
+
"stream_options",
|
|
1572
|
+
"thinking",
|
|
1573
|
+
"temperature",
|
|
1574
|
+
"top_p",
|
|
1575
|
+
"top_k",
|
|
1576
|
+
"tool_choice",
|
|
1577
|
+
"parallel_tool_calls",
|
|
1578
|
+
"modalities",
|
|
1579
|
+
"guardrails",
|
|
1580
|
+
"fallbacks",
|
|
1581
|
+
"retry",
|
|
1582
|
+
"cache",
|
|
1583
|
+
"load_balancer",
|
|
1584
|
+
"timeout",
|
|
1585
|
+
]
|
|
1586
|
+
)
|
|
1587
|
+
nullable_fields = set(
|
|
1588
|
+
[
|
|
1589
|
+
"audio",
|
|
1590
|
+
"frequency_penalty",
|
|
1591
|
+
"max_tokens",
|
|
1592
|
+
"max_completion_tokens",
|
|
1593
|
+
"logprobs",
|
|
1594
|
+
"top_logprobs",
|
|
1595
|
+
"n",
|
|
1596
|
+
"presence_penalty",
|
|
1597
|
+
"seed",
|
|
1598
|
+
"stop",
|
|
1599
|
+
"stream_options",
|
|
1600
|
+
"temperature",
|
|
1601
|
+
"top_p",
|
|
1602
|
+
"top_k",
|
|
1603
|
+
"modalities",
|
|
1604
|
+
]
|
|
1605
|
+
)
|
|
1119
1606
|
serialized = handler(self)
|
|
1120
|
-
|
|
1121
1607
|
m = {}
|
|
1122
1608
|
|
|
1123
1609
|
for n, f in type(self).model_fields.items():
|
|
1124
1610
|
k = f.alias or n
|
|
1125
1611
|
val = serialized.get(k)
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
):
|
|
1139
|
-
m[k] = val
|
|
1612
|
+
is_nullable_and_explicitly_set = (
|
|
1613
|
+
k in nullable_fields
|
|
1614
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1615
|
+
)
|
|
1616
|
+
|
|
1617
|
+
if val != UNSET_SENTINEL:
|
|
1618
|
+
if (
|
|
1619
|
+
val is not None
|
|
1620
|
+
or k not in optional_fields
|
|
1621
|
+
or is_nullable_and_explicitly_set
|
|
1622
|
+
):
|
|
1623
|
+
m[k] = val
|
|
1140
1624
|
|
|
1141
1625
|
return m
|
|
1142
1626
|
|
|
1143
1627
|
|
|
1144
|
-
class
|
|
1628
|
+
class RunAgentFallbackModelConfigurationAgentsRetryTypedDict(TypedDict):
|
|
1145
1629
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1146
1630
|
|
|
1147
1631
|
count: NotRequired[float]
|
|
@@ -1150,7 +1634,7 @@ class RunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
|
|
|
1150
1634
|
r"""HTTP status codes that trigger retry logic"""
|
|
1151
1635
|
|
|
1152
1636
|
|
|
1153
|
-
class
|
|
1637
|
+
class RunAgentFallbackModelConfigurationAgentsRetry(BaseModel):
|
|
1154
1638
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1155
1639
|
|
|
1156
1640
|
count: Optional[float] = 3
|
|
@@ -1159,6 +1643,22 @@ class RunAgentFallbackModelConfigurationRetry(BaseModel):
|
|
|
1159
1643
|
on_codes: Optional[List[float]] = None
|
|
1160
1644
|
r"""HTTP status codes that trigger retry logic"""
|
|
1161
1645
|
|
|
1646
|
+
@model_serializer(mode="wrap")
|
|
1647
|
+
def serialize_model(self, handler):
|
|
1648
|
+
optional_fields = set(["count", "on_codes"])
|
|
1649
|
+
serialized = handler(self)
|
|
1650
|
+
m = {}
|
|
1651
|
+
|
|
1652
|
+
for n, f in type(self).model_fields.items():
|
|
1653
|
+
k = f.alias or n
|
|
1654
|
+
val = serialized.get(k)
|
|
1655
|
+
|
|
1656
|
+
if val != UNSET_SENTINEL:
|
|
1657
|
+
if val is not None or k not in optional_fields:
|
|
1658
|
+
m[k] = val
|
|
1659
|
+
|
|
1660
|
+
return m
|
|
1661
|
+
|
|
1162
1662
|
|
|
1163
1663
|
class RunAgentFallbackModelConfiguration2TypedDict(TypedDict):
|
|
1164
1664
|
r"""Fallback model configuration with optional parameters and retry settings."""
|
|
@@ -1167,7 +1667,7 @@ class RunAgentFallbackModelConfiguration2TypedDict(TypedDict):
|
|
|
1167
1667
|
r"""A fallback model ID string. Must support tool calling."""
|
|
1168
1668
|
parameters: NotRequired[RunAgentFallbackModelConfigurationParametersTypedDict]
|
|
1169
1669
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1170
|
-
retry: NotRequired[
|
|
1670
|
+
retry: NotRequired[RunAgentFallbackModelConfigurationAgentsRetryTypedDict]
|
|
1171
1671
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1172
1672
|
|
|
1173
1673
|
|
|
@@ -1180,9 +1680,25 @@ class RunAgentFallbackModelConfiguration2(BaseModel):
|
|
|
1180
1680
|
parameters: Optional[RunAgentFallbackModelConfigurationParameters] = None
|
|
1181
1681
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1182
1682
|
|
|
1183
|
-
retry: Optional[
|
|
1683
|
+
retry: Optional[RunAgentFallbackModelConfigurationAgentsRetry] = None
|
|
1184
1684
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1185
1685
|
|
|
1686
|
+
@model_serializer(mode="wrap")
|
|
1687
|
+
def serialize_model(self, handler):
|
|
1688
|
+
optional_fields = set(["parameters", "retry"])
|
|
1689
|
+
serialized = handler(self)
|
|
1690
|
+
m = {}
|
|
1691
|
+
|
|
1692
|
+
for n, f in type(self).model_fields.items():
|
|
1693
|
+
k = f.alias or n
|
|
1694
|
+
val = serialized.get(k)
|
|
1695
|
+
|
|
1696
|
+
if val != UNSET_SENTINEL:
|
|
1697
|
+
if val is not None or k not in optional_fields:
|
|
1698
|
+
m[k] = val
|
|
1699
|
+
|
|
1700
|
+
return m
|
|
1701
|
+
|
|
1186
1702
|
|
|
1187
1703
|
RunAgentFallbackModelConfigurationTypedDict = TypeAliasType(
|
|
1188
1704
|
"RunAgentFallbackModelConfigurationTypedDict",
|
|
@@ -1265,6 +1781,22 @@ class RunAgentA2AMessage(BaseModel):
|
|
|
1265
1781
|
message_id: Annotated[Optional[str], pydantic.Field(alias="messageId")] = None
|
|
1266
1782
|
r"""Optional A2A message ID in ULID format"""
|
|
1267
1783
|
|
|
1784
|
+
@model_serializer(mode="wrap")
|
|
1785
|
+
def serialize_model(self, handler):
|
|
1786
|
+
optional_fields = set(["messageId"])
|
|
1787
|
+
serialized = handler(self)
|
|
1788
|
+
m = {}
|
|
1789
|
+
|
|
1790
|
+
for n, f in type(self).model_fields.items():
|
|
1791
|
+
k = f.alias or n
|
|
1792
|
+
val = serialized.get(k)
|
|
1793
|
+
|
|
1794
|
+
if val != UNSET_SENTINEL:
|
|
1795
|
+
if val is not None or k not in optional_fields:
|
|
1796
|
+
m[k] = val
|
|
1797
|
+
|
|
1798
|
+
return m
|
|
1799
|
+
|
|
1268
1800
|
|
|
1269
1801
|
class RunAgentIdentityTypedDict(TypedDict):
|
|
1270
1802
|
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
@@ -1304,6 +1836,22 @@ class RunAgentIdentity(BaseModel):
|
|
|
1304
1836
|
tags: Optional[List[str]] = None
|
|
1305
1837
|
r"""A list of tags associated with the contact"""
|
|
1306
1838
|
|
|
1839
|
+
@model_serializer(mode="wrap")
|
|
1840
|
+
def serialize_model(self, handler):
|
|
1841
|
+
optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
|
|
1842
|
+
serialized = handler(self)
|
|
1843
|
+
m = {}
|
|
1844
|
+
|
|
1845
|
+
for n, f in type(self).model_fields.items():
|
|
1846
|
+
k = f.alias or n
|
|
1847
|
+
val = serialized.get(k)
|
|
1848
|
+
|
|
1849
|
+
if val != UNSET_SENTINEL:
|
|
1850
|
+
if val is not None or k not in optional_fields:
|
|
1851
|
+
m[k] = val
|
|
1852
|
+
|
|
1853
|
+
return m
|
|
1854
|
+
|
|
1307
1855
|
|
|
1308
1856
|
@deprecated(
|
|
1309
1857
|
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
@@ -1349,6 +1897,22 @@ class RunAgentContact(BaseModel):
|
|
|
1349
1897
|
tags: Optional[List[str]] = None
|
|
1350
1898
|
r"""A list of tags associated with the contact"""
|
|
1351
1899
|
|
|
1900
|
+
@model_serializer(mode="wrap")
|
|
1901
|
+
def serialize_model(self, handler):
|
|
1902
|
+
optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
|
|
1903
|
+
serialized = handler(self)
|
|
1904
|
+
m = {}
|
|
1905
|
+
|
|
1906
|
+
for n, f in type(self).model_fields.items():
|
|
1907
|
+
k = f.alias or n
|
|
1908
|
+
val = serialized.get(k)
|
|
1909
|
+
|
|
1910
|
+
if val != UNSET_SENTINEL:
|
|
1911
|
+
if val is not None or k not in optional_fields:
|
|
1912
|
+
m[k] = val
|
|
1913
|
+
|
|
1914
|
+
return m
|
|
1915
|
+
|
|
1352
1916
|
|
|
1353
1917
|
class RunAgentThreadTypedDict(TypedDict):
|
|
1354
1918
|
r"""Thread information to group related requests"""
|
|
@@ -1368,6 +1932,22 @@ class RunAgentThread(BaseModel):
|
|
|
1368
1932
|
tags: Optional[List[str]] = None
|
|
1369
1933
|
r"""Optional tags to differentiate or categorize threads"""
|
|
1370
1934
|
|
|
1935
|
+
@model_serializer(mode="wrap")
|
|
1936
|
+
def serialize_model(self, handler):
|
|
1937
|
+
optional_fields = set(["tags"])
|
|
1938
|
+
serialized = handler(self)
|
|
1939
|
+
m = {}
|
|
1940
|
+
|
|
1941
|
+
for n, f in type(self).model_fields.items():
|
|
1942
|
+
k = f.alias or n
|
|
1943
|
+
val = serialized.get(k)
|
|
1944
|
+
|
|
1945
|
+
if val != UNSET_SENTINEL:
|
|
1946
|
+
if val is not None or k not in optional_fields:
|
|
1947
|
+
m[k] = val
|
|
1948
|
+
|
|
1949
|
+
return m
|
|
1950
|
+
|
|
1371
1951
|
|
|
1372
1952
|
class RunAgentMemoryTypedDict(TypedDict):
|
|
1373
1953
|
r"""Memory configuration for the agent execution. Used to associate memory stores with specific entities like users or sessions."""
|
|
@@ -1407,8 +1987,24 @@ class RunAgentTeamOfAgents(BaseModel):
|
|
|
1407
1987
|
role: Optional[str] = None
|
|
1408
1988
|
r"""The role of the agent in this context. This is used to give extra information to the leader to help it decide which agent to hand off to."""
|
|
1409
1989
|
|
|
1990
|
+
@model_serializer(mode="wrap")
|
|
1991
|
+
def serialize_model(self, handler):
|
|
1992
|
+
optional_fields = set(["role"])
|
|
1993
|
+
serialized = handler(self)
|
|
1994
|
+
m = {}
|
|
1410
1995
|
|
|
1411
|
-
|
|
1996
|
+
for n, f in type(self).model_fields.items():
|
|
1997
|
+
k = f.alias or n
|
|
1998
|
+
val = serialized.get(k)
|
|
1999
|
+
|
|
2000
|
+
if val != UNSET_SENTINEL:
|
|
2001
|
+
if val is not None or k not in optional_fields:
|
|
2002
|
+
m[k] = val
|
|
2003
|
+
|
|
2004
|
+
return m
|
|
2005
|
+
|
|
2006
|
+
|
|
2007
|
+
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type = Literal["mcp",]
|
|
1412
2008
|
|
|
1413
2009
|
|
|
1414
2010
|
class AgentToolInputRunHeadersTypedDict(TypedDict):
|
|
@@ -1421,29 +2017,61 @@ class AgentToolInputRunHeaders(BaseModel):
|
|
|
1421
2017
|
|
|
1422
2018
|
encrypted: Optional[bool] = False
|
|
1423
2019
|
|
|
2020
|
+
@model_serializer(mode="wrap")
|
|
2021
|
+
def serialize_model(self, handler):
|
|
2022
|
+
optional_fields = set(["encrypted"])
|
|
2023
|
+
serialized = handler(self)
|
|
2024
|
+
m = {}
|
|
2025
|
+
|
|
2026
|
+
for n, f in type(self).model_fields.items():
|
|
2027
|
+
k = f.alias or n
|
|
2028
|
+
val = serialized.get(k)
|
|
2029
|
+
|
|
2030
|
+
if val != UNSET_SENTINEL:
|
|
2031
|
+
if val is not None or k not in optional_fields:
|
|
2032
|
+
m[k] = val
|
|
2033
|
+
|
|
2034
|
+
return m
|
|
2035
|
+
|
|
1424
2036
|
|
|
1425
|
-
|
|
2037
|
+
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType = Literal[
|
|
1426
2038
|
"object",
|
|
1427
2039
|
]
|
|
1428
2040
|
|
|
1429
2041
|
|
|
1430
|
-
class
|
|
1431
|
-
type:
|
|
2042
|
+
class AgentToolInputRunSchemaTypedDict(TypedDict):
|
|
2043
|
+
type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
|
|
1432
2044
|
properties: NotRequired[Dict[str, Any]]
|
|
1433
2045
|
required: NotRequired[List[str]]
|
|
1434
2046
|
|
|
1435
2047
|
|
|
1436
|
-
class
|
|
1437
|
-
type:
|
|
2048
|
+
class AgentToolInputRunSchema(BaseModel):
|
|
2049
|
+
type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
|
|
1438
2050
|
|
|
1439
2051
|
properties: Optional[Dict[str, Any]] = None
|
|
1440
2052
|
|
|
1441
2053
|
required: Optional[List[str]] = None
|
|
1442
2054
|
|
|
2055
|
+
@model_serializer(mode="wrap")
|
|
2056
|
+
def serialize_model(self, handler):
|
|
2057
|
+
optional_fields = set(["properties", "required"])
|
|
2058
|
+
serialized = handler(self)
|
|
2059
|
+
m = {}
|
|
2060
|
+
|
|
2061
|
+
for n, f in type(self).model_fields.items():
|
|
2062
|
+
k = f.alias or n
|
|
2063
|
+
val = serialized.get(k)
|
|
2064
|
+
|
|
2065
|
+
if val != UNSET_SENTINEL:
|
|
2066
|
+
if val is not None or k not in optional_fields:
|
|
2067
|
+
m[k] = val
|
|
2068
|
+
|
|
2069
|
+
return m
|
|
2070
|
+
|
|
1443
2071
|
|
|
1444
2072
|
class RunAgentAgentToolInputRunToolsTypedDict(TypedDict):
|
|
1445
2073
|
name: str
|
|
1446
|
-
schema_:
|
|
2074
|
+
schema_: AgentToolInputRunSchemaTypedDict
|
|
1447
2075
|
id: NotRequired[str]
|
|
1448
2076
|
description: NotRequired[str]
|
|
1449
2077
|
|
|
@@ -1451,12 +2079,28 @@ class RunAgentAgentToolInputRunToolsTypedDict(TypedDict):
|
|
|
1451
2079
|
class RunAgentAgentToolInputRunTools(BaseModel):
|
|
1452
2080
|
name: str
|
|
1453
2081
|
|
|
1454
|
-
schema_: Annotated[
|
|
2082
|
+
schema_: Annotated[AgentToolInputRunSchema, pydantic.Field(alias="schema")]
|
|
1455
2083
|
|
|
1456
|
-
id: Optional[str] = "
|
|
2084
|
+
id: Optional[str] = "01KFTTTQY54TZ6RWFVWQEB2HHW"
|
|
1457
2085
|
|
|
1458
2086
|
description: Optional[str] = None
|
|
1459
2087
|
|
|
2088
|
+
@model_serializer(mode="wrap")
|
|
2089
|
+
def serialize_model(self, handler):
|
|
2090
|
+
optional_fields = set(["id", "description"])
|
|
2091
|
+
serialized = handler(self)
|
|
2092
|
+
m = {}
|
|
2093
|
+
|
|
2094
|
+
for n, f in type(self).model_fields.items():
|
|
2095
|
+
k = f.alias or n
|
|
2096
|
+
val = serialized.get(k)
|
|
2097
|
+
|
|
2098
|
+
if val != UNSET_SENTINEL:
|
|
2099
|
+
if val is not None or k not in optional_fields:
|
|
2100
|
+
m[k] = val
|
|
2101
|
+
|
|
2102
|
+
return m
|
|
2103
|
+
|
|
1460
2104
|
|
|
1461
2105
|
ConnectionType = Literal[
|
|
1462
2106
|
"http",
|
|
@@ -1489,11 +2133,27 @@ class Mcp(BaseModel):
|
|
|
1489
2133
|
headers: Optional[Dict[str, AgentToolInputRunHeaders]] = None
|
|
1490
2134
|
r"""HTTP headers for MCP server requests with encryption support"""
|
|
1491
2135
|
|
|
2136
|
+
@model_serializer(mode="wrap")
|
|
2137
|
+
def serialize_model(self, handler):
|
|
2138
|
+
optional_fields = set(["headers"])
|
|
2139
|
+
serialized = handler(self)
|
|
2140
|
+
m = {}
|
|
2141
|
+
|
|
2142
|
+
for n, f in type(self).model_fields.items():
|
|
2143
|
+
k = f.alias or n
|
|
2144
|
+
val = serialized.get(k)
|
|
2145
|
+
|
|
2146
|
+
if val != UNSET_SENTINEL:
|
|
2147
|
+
if val is not None or k not in optional_fields:
|
|
2148
|
+
m[k] = val
|
|
2149
|
+
|
|
2150
|
+
return m
|
|
2151
|
+
|
|
1492
2152
|
|
|
1493
2153
|
class MCPToolRunTypedDict(TypedDict):
|
|
1494
2154
|
r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
|
|
1495
2155
|
|
|
1496
|
-
type:
|
|
2156
|
+
type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
|
|
1497
2157
|
key: str
|
|
1498
2158
|
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
1499
2159
|
description: str
|
|
@@ -1507,7 +2167,7 @@ class MCPToolRunTypedDict(TypedDict):
|
|
|
1507
2167
|
class MCPToolRun(BaseModel):
|
|
1508
2168
|
r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
|
|
1509
2169
|
|
|
1510
|
-
type:
|
|
2170
|
+
type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
|
|
1511
2171
|
|
|
1512
2172
|
key: str
|
|
1513
2173
|
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
@@ -1523,6 +2183,155 @@ class MCPToolRun(BaseModel):
|
|
|
1523
2183
|
|
|
1524
2184
|
requires_approval: Optional[bool] = False
|
|
1525
2185
|
|
|
2186
|
+
@model_serializer(mode="wrap")
|
|
2187
|
+
def serialize_model(self, handler):
|
|
2188
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2189
|
+
serialized = handler(self)
|
|
2190
|
+
m = {}
|
|
2191
|
+
|
|
2192
|
+
for n, f in type(self).model_fields.items():
|
|
2193
|
+
k = f.alias or n
|
|
2194
|
+
val = serialized.get(k)
|
|
2195
|
+
|
|
2196
|
+
if val != UNSET_SENTINEL:
|
|
2197
|
+
if val is not None or k not in optional_fields:
|
|
2198
|
+
m[k] = val
|
|
2199
|
+
|
|
2200
|
+
return m
|
|
2201
|
+
|
|
2202
|
+
|
|
2203
|
+
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type = Literal[
|
|
2204
|
+
"json_schema",
|
|
2205
|
+
]
|
|
2206
|
+
|
|
2207
|
+
|
|
2208
|
+
class SchemaTypedDict(TypedDict):
|
|
2209
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2210
|
+
|
|
2211
|
+
type: str
|
|
2212
|
+
r"""The JSON Schema type"""
|
|
2213
|
+
properties: Dict[str, Any]
|
|
2214
|
+
r"""The properties of the JSON Schema object"""
|
|
2215
|
+
required: List[str]
|
|
2216
|
+
r"""Array of required property names"""
|
|
2217
|
+
|
|
2218
|
+
|
|
2219
|
+
class Schema(BaseModel):
|
|
2220
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2221
|
+
|
|
2222
|
+
model_config = ConfigDict(
|
|
2223
|
+
populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
|
|
2224
|
+
)
|
|
2225
|
+
__pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
|
|
2226
|
+
|
|
2227
|
+
type: str
|
|
2228
|
+
r"""The JSON Schema type"""
|
|
2229
|
+
|
|
2230
|
+
properties: Dict[str, Any]
|
|
2231
|
+
r"""The properties of the JSON Schema object"""
|
|
2232
|
+
|
|
2233
|
+
required: List[str]
|
|
2234
|
+
r"""Array of required property names"""
|
|
2235
|
+
|
|
2236
|
+
@property
|
|
2237
|
+
def additional_properties(self):
|
|
2238
|
+
return self.__pydantic_extra__
|
|
2239
|
+
|
|
2240
|
+
@additional_properties.setter
|
|
2241
|
+
def additional_properties(self, value):
|
|
2242
|
+
self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
|
|
2243
|
+
|
|
2244
|
+
|
|
2245
|
+
class AgentToolInputRunJSONSchemaTypedDict(TypedDict):
|
|
2246
|
+
name: str
|
|
2247
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2248
|
+
description: str
|
|
2249
|
+
r"""A description of what the response format is for. This will be shown to the user."""
|
|
2250
|
+
schema_: SchemaTypedDict
|
|
2251
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2252
|
+
strict: NotRequired[bool]
|
|
2253
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
|
|
2254
|
+
|
|
2255
|
+
|
|
2256
|
+
class AgentToolInputRunJSONSchema(BaseModel):
|
|
2257
|
+
name: str
|
|
2258
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2259
|
+
|
|
2260
|
+
description: str
|
|
2261
|
+
r"""A description of what the response format is for. This will be shown to the user."""
|
|
2262
|
+
|
|
2263
|
+
schema_: Annotated[Schema, pydantic.Field(alias="schema")]
|
|
2264
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2265
|
+
|
|
2266
|
+
strict: Optional[bool] = None
|
|
2267
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
|
|
2268
|
+
|
|
2269
|
+
@model_serializer(mode="wrap")
|
|
2270
|
+
def serialize_model(self, handler):
|
|
2271
|
+
optional_fields = set(["strict"])
|
|
2272
|
+
serialized = handler(self)
|
|
2273
|
+
m = {}
|
|
2274
|
+
|
|
2275
|
+
for n, f in type(self).model_fields.items():
|
|
2276
|
+
k = f.alias or n
|
|
2277
|
+
val = serialized.get(k)
|
|
2278
|
+
|
|
2279
|
+
if val != UNSET_SENTINEL:
|
|
2280
|
+
if val is not None or k not in optional_fields:
|
|
2281
|
+
m[k] = val
|
|
2282
|
+
|
|
2283
|
+
return m
|
|
2284
|
+
|
|
2285
|
+
|
|
2286
|
+
class JSONSchemaToolRunTypedDict(TypedDict):
|
|
2287
|
+
r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
|
|
2288
|
+
|
|
2289
|
+
type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
|
|
2290
|
+
key: str
|
|
2291
|
+
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
2292
|
+
description: str
|
|
2293
|
+
r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
|
|
2294
|
+
json_schema: AgentToolInputRunJSONSchemaTypedDict
|
|
2295
|
+
id: NotRequired[str]
|
|
2296
|
+
display_name: NotRequired[str]
|
|
2297
|
+
requires_approval: NotRequired[bool]
|
|
2298
|
+
|
|
2299
|
+
|
|
2300
|
+
class JSONSchemaToolRun(BaseModel):
|
|
2301
|
+
r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
|
|
2302
|
+
|
|
2303
|
+
type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
|
|
2304
|
+
|
|
2305
|
+
key: str
|
|
2306
|
+
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
2307
|
+
|
|
2308
|
+
description: str
|
|
2309
|
+
r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
|
|
2310
|
+
|
|
2311
|
+
json_schema: AgentToolInputRunJSONSchema
|
|
2312
|
+
|
|
2313
|
+
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = None
|
|
2314
|
+
|
|
2315
|
+
display_name: Optional[str] = None
|
|
2316
|
+
|
|
2317
|
+
requires_approval: Optional[bool] = False
|
|
2318
|
+
|
|
2319
|
+
@model_serializer(mode="wrap")
|
|
2320
|
+
def serialize_model(self, handler):
|
|
2321
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2322
|
+
serialized = handler(self)
|
|
2323
|
+
m = {}
|
|
2324
|
+
|
|
2325
|
+
for n, f in type(self).model_fields.items():
|
|
2326
|
+
k = f.alias or n
|
|
2327
|
+
val = serialized.get(k)
|
|
2328
|
+
|
|
2329
|
+
if val != UNSET_SENTINEL:
|
|
2330
|
+
if val is not None or k not in optional_fields:
|
|
2331
|
+
m[k] = val
|
|
2332
|
+
|
|
2333
|
+
return m
|
|
2334
|
+
|
|
1526
2335
|
|
|
1527
2336
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools14Type = Literal[
|
|
1528
2337
|
"function",
|
|
@@ -1596,6 +2405,22 @@ class AgentToolInputRunFunction(BaseModel):
|
|
|
1596
2405
|
parameters: Optional[RunAgentAgentToolInputRunParameters] = None
|
|
1597
2406
|
r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
1598
2407
|
|
|
2408
|
+
@model_serializer(mode="wrap")
|
|
2409
|
+
def serialize_model(self, handler):
|
|
2410
|
+
optional_fields = set(["description", "strict", "parameters"])
|
|
2411
|
+
serialized = handler(self)
|
|
2412
|
+
m = {}
|
|
2413
|
+
|
|
2414
|
+
for n, f in type(self).model_fields.items():
|
|
2415
|
+
k = f.alias or n
|
|
2416
|
+
val = serialized.get(k)
|
|
2417
|
+
|
|
2418
|
+
if val != UNSET_SENTINEL:
|
|
2419
|
+
if val is not None or k not in optional_fields:
|
|
2420
|
+
m[k] = val
|
|
2421
|
+
|
|
2422
|
+
return m
|
|
2423
|
+
|
|
1599
2424
|
|
|
1600
2425
|
class FunctionToolRunTypedDict(TypedDict):
|
|
1601
2426
|
r"""Function tool with inline definition for on-the-fly creation in run endpoint"""
|
|
@@ -1628,6 +2453,24 @@ class FunctionToolRun(BaseModel):
|
|
|
1628
2453
|
|
|
1629
2454
|
requires_approval: Optional[bool] = False
|
|
1630
2455
|
|
|
2456
|
+
@model_serializer(mode="wrap")
|
|
2457
|
+
def serialize_model(self, handler):
|
|
2458
|
+
optional_fields = set(
|
|
2459
|
+
["_id", "display_name", "description", "requires_approval"]
|
|
2460
|
+
)
|
|
2461
|
+
serialized = handler(self)
|
|
2462
|
+
m = {}
|
|
2463
|
+
|
|
2464
|
+
for n, f in type(self).model_fields.items():
|
|
2465
|
+
k = f.alias or n
|
|
2466
|
+
val = serialized.get(k)
|
|
2467
|
+
|
|
2468
|
+
if val != UNSET_SENTINEL:
|
|
2469
|
+
if val is not None or k not in optional_fields:
|
|
2470
|
+
m[k] = val
|
|
2471
|
+
|
|
2472
|
+
return m
|
|
2473
|
+
|
|
1631
2474
|
|
|
1632
2475
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools13Type = Literal["code",]
|
|
1633
2476
|
|
|
@@ -1695,6 +2538,22 @@ class CodeTool(BaseModel):
|
|
|
1695
2538
|
parameters: Optional[AgentToolInputRunParameters] = None
|
|
1696
2539
|
r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
1697
2540
|
|
|
2541
|
+
@model_serializer(mode="wrap")
|
|
2542
|
+
def serialize_model(self, handler):
|
|
2543
|
+
optional_fields = set(["parameters"])
|
|
2544
|
+
serialized = handler(self)
|
|
2545
|
+
m = {}
|
|
2546
|
+
|
|
2547
|
+
for n, f in type(self).model_fields.items():
|
|
2548
|
+
k = f.alias or n
|
|
2549
|
+
val = serialized.get(k)
|
|
2550
|
+
|
|
2551
|
+
if val != UNSET_SENTINEL:
|
|
2552
|
+
if val is not None or k not in optional_fields:
|
|
2553
|
+
m[k] = val
|
|
2554
|
+
|
|
2555
|
+
return m
|
|
2556
|
+
|
|
1698
2557
|
|
|
1699
2558
|
class CodeToolRunTypedDict(TypedDict):
|
|
1700
2559
|
r"""Code execution tool with inline definition for on-the-fly creation in run endpoint"""
|
|
@@ -1729,6 +2588,22 @@ class CodeToolRun(BaseModel):
|
|
|
1729
2588
|
|
|
1730
2589
|
requires_approval: Optional[bool] = False
|
|
1731
2590
|
|
|
2591
|
+
@model_serializer(mode="wrap")
|
|
2592
|
+
def serialize_model(self, handler):
|
|
2593
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2594
|
+
serialized = handler(self)
|
|
2595
|
+
m = {}
|
|
2596
|
+
|
|
2597
|
+
for n, f in type(self).model_fields.items():
|
|
2598
|
+
k = f.alias or n
|
|
2599
|
+
val = serialized.get(k)
|
|
2600
|
+
|
|
2601
|
+
if val != UNSET_SENTINEL:
|
|
2602
|
+
if val is not None or k not in optional_fields:
|
|
2603
|
+
m[k] = val
|
|
2604
|
+
|
|
2605
|
+
return m
|
|
2606
|
+
|
|
1732
2607
|
|
|
1733
2608
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12Type = Literal["http",]
|
|
1734
2609
|
|
|
@@ -1747,10 +2622,26 @@ class Headers2TypedDict(TypedDict):
|
|
|
1747
2622
|
encrypted: NotRequired[bool]
|
|
1748
2623
|
|
|
1749
2624
|
|
|
1750
|
-
class Headers2(BaseModel):
|
|
1751
|
-
value: str
|
|
2625
|
+
class Headers2(BaseModel):
|
|
2626
|
+
value: str
|
|
2627
|
+
|
|
2628
|
+
encrypted: Optional[bool] = False
|
|
2629
|
+
|
|
2630
|
+
@model_serializer(mode="wrap")
|
|
2631
|
+
def serialize_model(self, handler):
|
|
2632
|
+
optional_fields = set(["encrypted"])
|
|
2633
|
+
serialized = handler(self)
|
|
2634
|
+
m = {}
|
|
2635
|
+
|
|
2636
|
+
for n, f in type(self).model_fields.items():
|
|
2637
|
+
k = f.alias or n
|
|
2638
|
+
val = serialized.get(k)
|
|
2639
|
+
|
|
2640
|
+
if val != UNSET_SENTINEL:
|
|
2641
|
+
if val is not None or k not in optional_fields:
|
|
2642
|
+
m[k] = val
|
|
1752
2643
|
|
|
1753
|
-
|
|
2644
|
+
return m
|
|
1754
2645
|
|
|
1755
2646
|
|
|
1756
2647
|
HeadersTypedDict = TypeAliasType("HeadersTypedDict", Union[Headers2TypedDict, str])
|
|
@@ -1787,6 +2678,22 @@ class Blueprint(BaseModel):
|
|
|
1787
2678
|
body: Optional[Dict[str, Any]] = None
|
|
1788
2679
|
r"""The body to send with the request."""
|
|
1789
2680
|
|
|
2681
|
+
@model_serializer(mode="wrap")
|
|
2682
|
+
def serialize_model(self, handler):
|
|
2683
|
+
optional_fields = set(["headers", "body"])
|
|
2684
|
+
serialized = handler(self)
|
|
2685
|
+
m = {}
|
|
2686
|
+
|
|
2687
|
+
for n, f in type(self).model_fields.items():
|
|
2688
|
+
k = f.alias or n
|
|
2689
|
+
val = serialized.get(k)
|
|
2690
|
+
|
|
2691
|
+
if val != UNSET_SENTINEL:
|
|
2692
|
+
if val is not None or k not in optional_fields:
|
|
2693
|
+
m[k] = val
|
|
2694
|
+
|
|
2695
|
+
return m
|
|
2696
|
+
|
|
1790
2697
|
|
|
1791
2698
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12HTTPType = Literal[
|
|
1792
2699
|
"string",
|
|
@@ -1828,6 +2735,22 @@ class Arguments(BaseModel):
|
|
|
1828
2735
|
default_value: Optional[DefaultValue] = None
|
|
1829
2736
|
r"""The default value of the argument."""
|
|
1830
2737
|
|
|
2738
|
+
@model_serializer(mode="wrap")
|
|
2739
|
+
def serialize_model(self, handler):
|
|
2740
|
+
optional_fields = set(["send_to_model", "default_value"])
|
|
2741
|
+
serialized = handler(self)
|
|
2742
|
+
m = {}
|
|
2743
|
+
|
|
2744
|
+
for n, f in type(self).model_fields.items():
|
|
2745
|
+
k = f.alias or n
|
|
2746
|
+
val = serialized.get(k)
|
|
2747
|
+
|
|
2748
|
+
if val != UNSET_SENTINEL:
|
|
2749
|
+
if val is not None or k not in optional_fields:
|
|
2750
|
+
m[k] = val
|
|
2751
|
+
|
|
2752
|
+
return m
|
|
2753
|
+
|
|
1831
2754
|
|
|
1832
2755
|
class HTTPTypedDict(TypedDict):
|
|
1833
2756
|
blueprint: BlueprintTypedDict
|
|
@@ -1843,6 +2766,22 @@ class HTTP(BaseModel):
|
|
|
1843
2766
|
arguments: Optional[Dict[str, Arguments]] = None
|
|
1844
2767
|
r"""The arguments to send with the request. The keys will be used to replace the placeholders in the `blueprint` field."""
|
|
1845
2768
|
|
|
2769
|
+
@model_serializer(mode="wrap")
|
|
2770
|
+
def serialize_model(self, handler):
|
|
2771
|
+
optional_fields = set(["arguments"])
|
|
2772
|
+
serialized = handler(self)
|
|
2773
|
+
m = {}
|
|
2774
|
+
|
|
2775
|
+
for n, f in type(self).model_fields.items():
|
|
2776
|
+
k = f.alias or n
|
|
2777
|
+
val = serialized.get(k)
|
|
2778
|
+
|
|
2779
|
+
if val != UNSET_SENTINEL:
|
|
2780
|
+
if val is not None or k not in optional_fields:
|
|
2781
|
+
m[k] = val
|
|
2782
|
+
|
|
2783
|
+
return m
|
|
2784
|
+
|
|
1846
2785
|
|
|
1847
2786
|
class HTTPToolRunTypedDict(TypedDict):
|
|
1848
2787
|
r"""HTTP tool with inline definition for on-the-fly creation in run endpoint"""
|
|
@@ -1877,6 +2816,22 @@ class HTTPToolRun(BaseModel):
|
|
|
1877
2816
|
|
|
1878
2817
|
requires_approval: Optional[bool] = False
|
|
1879
2818
|
|
|
2819
|
+
@model_serializer(mode="wrap")
|
|
2820
|
+
def serialize_model(self, handler):
|
|
2821
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2822
|
+
serialized = handler(self)
|
|
2823
|
+
m = {}
|
|
2824
|
+
|
|
2825
|
+
for n, f in type(self).model_fields.items():
|
|
2826
|
+
k = f.alias or n
|
|
2827
|
+
val = serialized.get(k)
|
|
2828
|
+
|
|
2829
|
+
if val != UNSET_SENTINEL:
|
|
2830
|
+
if val is not None or k not in optional_fields:
|
|
2831
|
+
m[k] = val
|
|
2832
|
+
|
|
2833
|
+
return m
|
|
2834
|
+
|
|
1880
2835
|
|
|
1881
2836
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools11Type = Literal[
|
|
1882
2837
|
"current_date",
|
|
@@ -1899,6 +2854,22 @@ class AgentToolInputRunCurrentDateTool(BaseModel):
|
|
|
1899
2854
|
requires_approval: Optional[bool] = None
|
|
1900
2855
|
r"""Whether this tool requires approval before execution"""
|
|
1901
2856
|
|
|
2857
|
+
@model_serializer(mode="wrap")
|
|
2858
|
+
def serialize_model(self, handler):
|
|
2859
|
+
optional_fields = set(["requires_approval"])
|
|
2860
|
+
serialized = handler(self)
|
|
2861
|
+
m = {}
|
|
2862
|
+
|
|
2863
|
+
for n, f in type(self).model_fields.items():
|
|
2864
|
+
k = f.alias or n
|
|
2865
|
+
val = serialized.get(k)
|
|
2866
|
+
|
|
2867
|
+
if val != UNSET_SENTINEL:
|
|
2868
|
+
if val is not None or k not in optional_fields:
|
|
2869
|
+
m[k] = val
|
|
2870
|
+
|
|
2871
|
+
return m
|
|
2872
|
+
|
|
1902
2873
|
|
|
1903
2874
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools10Type = Literal[
|
|
1904
2875
|
"query_knowledge_base",
|
|
@@ -1921,6 +2892,22 @@ class AgentToolInputRunQueryKnowledgeBaseTool(BaseModel):
|
|
|
1921
2892
|
requires_approval: Optional[bool] = None
|
|
1922
2893
|
r"""Whether this tool requires approval before execution"""
|
|
1923
2894
|
|
|
2895
|
+
@model_serializer(mode="wrap")
|
|
2896
|
+
def serialize_model(self, handler):
|
|
2897
|
+
optional_fields = set(["requires_approval"])
|
|
2898
|
+
serialized = handler(self)
|
|
2899
|
+
m = {}
|
|
2900
|
+
|
|
2901
|
+
for n, f in type(self).model_fields.items():
|
|
2902
|
+
k = f.alias or n
|
|
2903
|
+
val = serialized.get(k)
|
|
2904
|
+
|
|
2905
|
+
if val != UNSET_SENTINEL:
|
|
2906
|
+
if val is not None or k not in optional_fields:
|
|
2907
|
+
m[k] = val
|
|
2908
|
+
|
|
2909
|
+
return m
|
|
2910
|
+
|
|
1924
2911
|
|
|
1925
2912
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools9Type = Literal[
|
|
1926
2913
|
"retrieve_knowledge_bases",
|
|
@@ -1943,6 +2930,22 @@ class AgentToolInputRunRetrieveKnowledgeBasesTool(BaseModel):
|
|
|
1943
2930
|
requires_approval: Optional[bool] = None
|
|
1944
2931
|
r"""Whether this tool requires approval before execution"""
|
|
1945
2932
|
|
|
2933
|
+
@model_serializer(mode="wrap")
|
|
2934
|
+
def serialize_model(self, handler):
|
|
2935
|
+
optional_fields = set(["requires_approval"])
|
|
2936
|
+
serialized = handler(self)
|
|
2937
|
+
m = {}
|
|
2938
|
+
|
|
2939
|
+
for n, f in type(self).model_fields.items():
|
|
2940
|
+
k = f.alias or n
|
|
2941
|
+
val = serialized.get(k)
|
|
2942
|
+
|
|
2943
|
+
if val != UNSET_SENTINEL:
|
|
2944
|
+
if val is not None or k not in optional_fields:
|
|
2945
|
+
m[k] = val
|
|
2946
|
+
|
|
2947
|
+
return m
|
|
2948
|
+
|
|
1946
2949
|
|
|
1947
2950
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools8Type = Literal[
|
|
1948
2951
|
"delete_memory_document",
|
|
@@ -1965,6 +2968,22 @@ class AgentToolInputRunDeleteMemoryDocumentTool(BaseModel):
|
|
|
1965
2968
|
requires_approval: Optional[bool] = None
|
|
1966
2969
|
r"""Whether this tool requires approval before execution"""
|
|
1967
2970
|
|
|
2971
|
+
@model_serializer(mode="wrap")
|
|
2972
|
+
def serialize_model(self, handler):
|
|
2973
|
+
optional_fields = set(["requires_approval"])
|
|
2974
|
+
serialized = handler(self)
|
|
2975
|
+
m = {}
|
|
2976
|
+
|
|
2977
|
+
for n, f in type(self).model_fields.items():
|
|
2978
|
+
k = f.alias or n
|
|
2979
|
+
val = serialized.get(k)
|
|
2980
|
+
|
|
2981
|
+
if val != UNSET_SENTINEL:
|
|
2982
|
+
if val is not None or k not in optional_fields:
|
|
2983
|
+
m[k] = val
|
|
2984
|
+
|
|
2985
|
+
return m
|
|
2986
|
+
|
|
1968
2987
|
|
|
1969
2988
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsToolsType = Literal[
|
|
1970
2989
|
"retrieve_memory_stores",
|
|
@@ -1987,6 +3006,22 @@ class AgentToolInputRunRetrieveMemoryStoresTool(BaseModel):
|
|
|
1987
3006
|
requires_approval: Optional[bool] = None
|
|
1988
3007
|
r"""Whether this tool requires approval before execution"""
|
|
1989
3008
|
|
|
3009
|
+
@model_serializer(mode="wrap")
|
|
3010
|
+
def serialize_model(self, handler):
|
|
3011
|
+
optional_fields = set(["requires_approval"])
|
|
3012
|
+
serialized = handler(self)
|
|
3013
|
+
m = {}
|
|
3014
|
+
|
|
3015
|
+
for n, f in type(self).model_fields.items():
|
|
3016
|
+
k = f.alias or n
|
|
3017
|
+
val = serialized.get(k)
|
|
3018
|
+
|
|
3019
|
+
if val != UNSET_SENTINEL:
|
|
3020
|
+
if val is not None or k not in optional_fields:
|
|
3021
|
+
m[k] = val
|
|
3022
|
+
|
|
3023
|
+
return m
|
|
3024
|
+
|
|
1990
3025
|
|
|
1991
3026
|
RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsType = Literal[
|
|
1992
3027
|
"write_memory_store",
|
|
@@ -2009,6 +3044,22 @@ class AgentToolInputRunWriteMemoryStoreTool(BaseModel):
|
|
|
2009
3044
|
requires_approval: Optional[bool] = None
|
|
2010
3045
|
r"""Whether this tool requires approval before execution"""
|
|
2011
3046
|
|
|
3047
|
+
@model_serializer(mode="wrap")
|
|
3048
|
+
def serialize_model(self, handler):
|
|
3049
|
+
optional_fields = set(["requires_approval"])
|
|
3050
|
+
serialized = handler(self)
|
|
3051
|
+
m = {}
|
|
3052
|
+
|
|
3053
|
+
for n, f in type(self).model_fields.items():
|
|
3054
|
+
k = f.alias or n
|
|
3055
|
+
val = serialized.get(k)
|
|
3056
|
+
|
|
3057
|
+
if val != UNSET_SENTINEL:
|
|
3058
|
+
if val is not None or k not in optional_fields:
|
|
3059
|
+
m[k] = val
|
|
3060
|
+
|
|
3061
|
+
return m
|
|
3062
|
+
|
|
2012
3063
|
|
|
2013
3064
|
RunAgentAgentToolInputRunAgentsRequestRequestBodyType = Literal["query_memory_store",]
|
|
2014
3065
|
|
|
@@ -2029,6 +3080,22 @@ class AgentToolInputRunQueryMemoryStoreTool(BaseModel):
|
|
|
2029
3080
|
requires_approval: Optional[bool] = None
|
|
2030
3081
|
r"""Whether this tool requires approval before execution"""
|
|
2031
3082
|
|
|
3083
|
+
@model_serializer(mode="wrap")
|
|
3084
|
+
def serialize_model(self, handler):
|
|
3085
|
+
optional_fields = set(["requires_approval"])
|
|
3086
|
+
serialized = handler(self)
|
|
3087
|
+
m = {}
|
|
3088
|
+
|
|
3089
|
+
for n, f in type(self).model_fields.items():
|
|
3090
|
+
k = f.alias or n
|
|
3091
|
+
val = serialized.get(k)
|
|
3092
|
+
|
|
3093
|
+
if val != UNSET_SENTINEL:
|
|
3094
|
+
if val is not None or k not in optional_fields:
|
|
3095
|
+
m[k] = val
|
|
3096
|
+
|
|
3097
|
+
return m
|
|
3098
|
+
|
|
2032
3099
|
|
|
2033
3100
|
RunAgentAgentToolInputRunAgentsRequestType = Literal["retrieve_agents",]
|
|
2034
3101
|
|
|
@@ -2049,6 +3116,22 @@ class AgentToolInputRunRetrieveAgentsTool(BaseModel):
|
|
|
2049
3116
|
requires_approval: Optional[bool] = None
|
|
2050
3117
|
r"""Whether this tool requires approval before execution"""
|
|
2051
3118
|
|
|
3119
|
+
@model_serializer(mode="wrap")
|
|
3120
|
+
def serialize_model(self, handler):
|
|
3121
|
+
optional_fields = set(["requires_approval"])
|
|
3122
|
+
serialized = handler(self)
|
|
3123
|
+
m = {}
|
|
3124
|
+
|
|
3125
|
+
for n, f in type(self).model_fields.items():
|
|
3126
|
+
k = f.alias or n
|
|
3127
|
+
val = serialized.get(k)
|
|
3128
|
+
|
|
3129
|
+
if val != UNSET_SENTINEL:
|
|
3130
|
+
if val is not None or k not in optional_fields:
|
|
3131
|
+
m[k] = val
|
|
3132
|
+
|
|
3133
|
+
return m
|
|
3134
|
+
|
|
2052
3135
|
|
|
2053
3136
|
RunAgentAgentToolInputRunAgentsType = Literal["call_sub_agent",]
|
|
2054
3137
|
|
|
@@ -2069,6 +3152,22 @@ class AgentToolInputRunCallSubAgentTool(BaseModel):
|
|
|
2069
3152
|
requires_approval: Optional[bool] = None
|
|
2070
3153
|
r"""Whether this tool requires approval before execution"""
|
|
2071
3154
|
|
|
3155
|
+
@model_serializer(mode="wrap")
|
|
3156
|
+
def serialize_model(self, handler):
|
|
3157
|
+
optional_fields = set(["requires_approval"])
|
|
3158
|
+
serialized = handler(self)
|
|
3159
|
+
m = {}
|
|
3160
|
+
|
|
3161
|
+
for n, f in type(self).model_fields.items():
|
|
3162
|
+
k = f.alias or n
|
|
3163
|
+
val = serialized.get(k)
|
|
3164
|
+
|
|
3165
|
+
if val != UNSET_SENTINEL:
|
|
3166
|
+
if val is not None or k not in optional_fields:
|
|
3167
|
+
m[k] = val
|
|
3168
|
+
|
|
3169
|
+
return m
|
|
3170
|
+
|
|
2072
3171
|
|
|
2073
3172
|
RunAgentAgentToolInputRunType = Literal["web_scraper",]
|
|
2074
3173
|
|
|
@@ -2089,6 +3188,22 @@ class AgentToolInputRunWebScraperTool(BaseModel):
|
|
|
2089
3188
|
requires_approval: Optional[bool] = None
|
|
2090
3189
|
r"""Whether this tool requires approval before execution"""
|
|
2091
3190
|
|
|
3191
|
+
@model_serializer(mode="wrap")
|
|
3192
|
+
def serialize_model(self, handler):
|
|
3193
|
+
optional_fields = set(["requires_approval"])
|
|
3194
|
+
serialized = handler(self)
|
|
3195
|
+
m = {}
|
|
3196
|
+
|
|
3197
|
+
for n, f in type(self).model_fields.items():
|
|
3198
|
+
k = f.alias or n
|
|
3199
|
+
val = serialized.get(k)
|
|
3200
|
+
|
|
3201
|
+
if val != UNSET_SENTINEL:
|
|
3202
|
+
if val is not None or k not in optional_fields:
|
|
3203
|
+
m[k] = val
|
|
3204
|
+
|
|
3205
|
+
return m
|
|
3206
|
+
|
|
2092
3207
|
|
|
2093
3208
|
AgentToolInputRunType = Literal["google_search",]
|
|
2094
3209
|
|
|
@@ -2109,6 +3224,22 @@ class AgentToolInputRunGoogleSearchTool(BaseModel):
|
|
|
2109
3224
|
requires_approval: Optional[bool] = None
|
|
2110
3225
|
r"""Whether this tool requires approval before execution"""
|
|
2111
3226
|
|
|
3227
|
+
@model_serializer(mode="wrap")
|
|
3228
|
+
def serialize_model(self, handler):
|
|
3229
|
+
optional_fields = set(["requires_approval"])
|
|
3230
|
+
serialized = handler(self)
|
|
3231
|
+
m = {}
|
|
3232
|
+
|
|
3233
|
+
for n, f in type(self).model_fields.items():
|
|
3234
|
+
k = f.alias or n
|
|
3235
|
+
val = serialized.get(k)
|
|
3236
|
+
|
|
3237
|
+
if val != UNSET_SENTINEL:
|
|
3238
|
+
if val is not None or k not in optional_fields:
|
|
3239
|
+
m[k] = val
|
|
3240
|
+
|
|
3241
|
+
return m
|
|
3242
|
+
|
|
2112
3243
|
|
|
2113
3244
|
AgentToolInputRunTypedDict = TypeAliasType(
|
|
2114
3245
|
"AgentToolInputRunTypedDict",
|
|
@@ -2127,10 +3258,11 @@ AgentToolInputRunTypedDict = TypeAliasType(
|
|
|
2127
3258
|
HTTPToolRunTypedDict,
|
|
2128
3259
|
CodeToolRunTypedDict,
|
|
2129
3260
|
FunctionToolRunTypedDict,
|
|
3261
|
+
JSONSchemaToolRunTypedDict,
|
|
2130
3262
|
MCPToolRunTypedDict,
|
|
2131
3263
|
],
|
|
2132
3264
|
)
|
|
2133
|
-
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
|
|
3265
|
+
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
|
|
2134
3266
|
|
|
2135
3267
|
|
|
2136
3268
|
AgentToolInputRun = Annotated[
|
|
@@ -2155,11 +3287,12 @@ AgentToolInputRun = Annotated[
|
|
|
2155
3287
|
Annotated[HTTPToolRun, Tag("http")],
|
|
2156
3288
|
Annotated[CodeToolRun, Tag("code")],
|
|
2157
3289
|
Annotated[FunctionToolRun, Tag("function")],
|
|
3290
|
+
Annotated[JSONSchemaToolRun, Tag("json_schema")],
|
|
2158
3291
|
Annotated[MCPToolRun, Tag("mcp")],
|
|
2159
3292
|
],
|
|
2160
3293
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2161
3294
|
]
|
|
2162
|
-
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
|
|
3295
|
+
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
|
|
2163
3296
|
|
|
2164
3297
|
|
|
2165
3298
|
RunAgentToolApprovalRequired = Literal[
|
|
@@ -2196,6 +3329,22 @@ class RunAgentEvaluators(BaseModel):
|
|
|
2196
3329
|
sample_rate: Optional[float] = 50
|
|
2197
3330
|
r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
|
|
2198
3331
|
|
|
3332
|
+
@model_serializer(mode="wrap")
|
|
3333
|
+
def serialize_model(self, handler):
|
|
3334
|
+
optional_fields = set(["sample_rate"])
|
|
3335
|
+
serialized = handler(self)
|
|
3336
|
+
m = {}
|
|
3337
|
+
|
|
3338
|
+
for n, f in type(self).model_fields.items():
|
|
3339
|
+
k = f.alias or n
|
|
3340
|
+
val = serialized.get(k)
|
|
3341
|
+
|
|
3342
|
+
if val != UNSET_SENTINEL:
|
|
3343
|
+
if val is not None or k not in optional_fields:
|
|
3344
|
+
m[k] = val
|
|
3345
|
+
|
|
3346
|
+
return m
|
|
3347
|
+
|
|
2199
3348
|
|
|
2200
3349
|
RunAgentAgentsExecuteOn = Literal[
|
|
2201
3350
|
"input",
|
|
@@ -2223,6 +3372,22 @@ class RunAgentGuardrails(BaseModel):
|
|
|
2223
3372
|
sample_rate: Optional[float] = 50
|
|
2224
3373
|
r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
|
|
2225
3374
|
|
|
3375
|
+
@model_serializer(mode="wrap")
|
|
3376
|
+
def serialize_model(self, handler):
|
|
3377
|
+
optional_fields = set(["sample_rate"])
|
|
3378
|
+
serialized = handler(self)
|
|
3379
|
+
m = {}
|
|
3380
|
+
|
|
3381
|
+
for n, f in type(self).model_fields.items():
|
|
3382
|
+
k = f.alias or n
|
|
3383
|
+
val = serialized.get(k)
|
|
3384
|
+
|
|
3385
|
+
if val != UNSET_SENTINEL:
|
|
3386
|
+
if val is not None or k not in optional_fields:
|
|
3387
|
+
m[k] = val
|
|
3388
|
+
|
|
3389
|
+
return m
|
|
3390
|
+
|
|
2226
3391
|
|
|
2227
3392
|
class RunAgentSettingsTypedDict(TypedDict):
|
|
2228
3393
|
tools: NotRequired[List[AgentToolInputRunTypedDict]]
|
|
@@ -2258,6 +3423,31 @@ class RunAgentSettings(BaseModel):
|
|
|
2258
3423
|
guardrails: Optional[List[RunAgentGuardrails]] = None
|
|
2259
3424
|
r"""Configuration for a guardrail applied to the agent"""
|
|
2260
3425
|
|
|
3426
|
+
@model_serializer(mode="wrap")
|
|
3427
|
+
def serialize_model(self, handler):
|
|
3428
|
+
optional_fields = set(
|
|
3429
|
+
[
|
|
3430
|
+
"tools",
|
|
3431
|
+
"tool_approval_required",
|
|
3432
|
+
"max_iterations",
|
|
3433
|
+
"max_execution_time",
|
|
3434
|
+
"evaluators",
|
|
3435
|
+
"guardrails",
|
|
3436
|
+
]
|
|
3437
|
+
)
|
|
3438
|
+
serialized = handler(self)
|
|
3439
|
+
m = {}
|
|
3440
|
+
|
|
3441
|
+
for n, f in type(self).model_fields.items():
|
|
3442
|
+
k = f.alias or n
|
|
3443
|
+
val = serialized.get(k)
|
|
3444
|
+
|
|
3445
|
+
if val != UNSET_SENTINEL:
|
|
3446
|
+
if val is not None or k not in optional_fields:
|
|
3447
|
+
m[k] = val
|
|
3448
|
+
|
|
3449
|
+
return m
|
|
3450
|
+
|
|
2261
3451
|
|
|
2262
3452
|
class RunAgentRequestBodyTypedDict(TypedDict):
|
|
2263
3453
|
key: str
|
|
@@ -2376,6 +3566,38 @@ class RunAgentRequestBody(BaseModel):
|
|
|
2376
3566
|
metadata: Optional[Dict[str, Any]] = None
|
|
2377
3567
|
r"""Optional metadata for the agent run as key-value pairs that will be included in traces"""
|
|
2378
3568
|
|
|
3569
|
+
@model_serializer(mode="wrap")
|
|
3570
|
+
def serialize_model(self, handler):
|
|
3571
|
+
optional_fields = set(
|
|
3572
|
+
[
|
|
3573
|
+
"task_id",
|
|
3574
|
+
"fallback_models",
|
|
3575
|
+
"variables",
|
|
3576
|
+
"identity",
|
|
3577
|
+
"contact",
|
|
3578
|
+
"thread",
|
|
3579
|
+
"memory",
|
|
3580
|
+
"description",
|
|
3581
|
+
"system_prompt",
|
|
3582
|
+
"memory_stores",
|
|
3583
|
+
"knowledge_bases",
|
|
3584
|
+
"team_of_agents",
|
|
3585
|
+
"metadata",
|
|
3586
|
+
]
|
|
3587
|
+
)
|
|
3588
|
+
serialized = handler(self)
|
|
3589
|
+
m = {}
|
|
3590
|
+
|
|
3591
|
+
for n, f in type(self).model_fields.items():
|
|
3592
|
+
k = f.alias or n
|
|
3593
|
+
val = serialized.get(k)
|
|
3594
|
+
|
|
3595
|
+
if val != UNSET_SENTINEL:
|
|
3596
|
+
if val is not None or k not in optional_fields:
|
|
3597
|
+
m[k] = val
|
|
3598
|
+
|
|
3599
|
+
return m
|
|
3600
|
+
|
|
2379
3601
|
|
|
2380
3602
|
RunAgentKind = Literal["task",]
|
|
2381
3603
|
r"""A2A entity type identifier"""
|
|
@@ -2478,6 +3700,22 @@ class RunAgentTaskStatus(BaseModel):
|
|
|
2478
3700
|
message: Optional[RunAgentTaskStatusMessage] = None
|
|
2479
3701
|
r"""Optional A2A message providing additional context about the current status"""
|
|
2480
3702
|
|
|
3703
|
+
@model_serializer(mode="wrap")
|
|
3704
|
+
def serialize_model(self, handler):
|
|
3705
|
+
optional_fields = set(["timestamp", "message"])
|
|
3706
|
+
serialized = handler(self)
|
|
3707
|
+
m = {}
|
|
3708
|
+
|
|
3709
|
+
for n, f in type(self).model_fields.items():
|
|
3710
|
+
k = f.alias or n
|
|
3711
|
+
val = serialized.get(k)
|
|
3712
|
+
|
|
3713
|
+
if val != UNSET_SENTINEL:
|
|
3714
|
+
if val is not None or k not in optional_fields:
|
|
3715
|
+
m[k] = val
|
|
3716
|
+
|
|
3717
|
+
return m
|
|
3718
|
+
|
|
2481
3719
|
|
|
2482
3720
|
class RunAgentA2ATaskResponseTypedDict(TypedDict):
|
|
2483
3721
|
r"""Response format following the Agent-to-Agent (A2A) protocol. Returned when starting or continuing an agent task execution."""
|
|
@@ -2511,3 +3749,19 @@ class RunAgentA2ATaskResponse(BaseModel):
|
|
|
2511
3749
|
|
|
2512
3750
|
metadata: Optional[Dict[str, Any]] = None
|
|
2513
3751
|
r"""Task metadata containing workspace_id and trace_id for feedback and tracking"""
|
|
3752
|
+
|
|
3753
|
+
@model_serializer(mode="wrap")
|
|
3754
|
+
def serialize_model(self, handler):
|
|
3755
|
+
optional_fields = set(["metadata"])
|
|
3756
|
+
serialized = handler(self)
|
|
3757
|
+
m = {}
|
|
3758
|
+
|
|
3759
|
+
for n, f in type(self).model_fields.items():
|
|
3760
|
+
k = f.alias or n
|
|
3761
|
+
val = serialized.get(k)
|
|
3762
|
+
|
|
3763
|
+
if val != UNSET_SENTINEL:
|
|
3764
|
+
if val is not None or k not in optional_fields:
|
|
3765
|
+
m[k] = val
|
|
3766
|
+
|
|
3767
|
+
return m
|