orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/agents.py +186 -186
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5746 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1945 -383
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1381 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +407 -128
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2759 -1251
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1696 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1679 -230
- orq_ai_sdk/models/getpromptversionop.py +1676 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +805 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1690 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1462 -196
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1439 -194
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1968 -397
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2854 -1448
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +364 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
- orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
|
@@ -167,6 +167,22 @@ class StreamRunAgentResponseFormatAgentsJSONSchema(BaseModel):
|
|
|
167
167
|
strict: Optional[bool] = False
|
|
168
168
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
169
169
|
|
|
170
|
+
@model_serializer(mode="wrap")
|
|
171
|
+
def serialize_model(self, handler):
|
|
172
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
173
|
+
serialized = handler(self)
|
|
174
|
+
m = {}
|
|
175
|
+
|
|
176
|
+
for n, f in type(self).model_fields.items():
|
|
177
|
+
k = f.alias or n
|
|
178
|
+
val = serialized.get(k)
|
|
179
|
+
|
|
180
|
+
if val != UNSET_SENTINEL:
|
|
181
|
+
if val is not None or k not in optional_fields:
|
|
182
|
+
m[k] = val
|
|
183
|
+
|
|
184
|
+
return m
|
|
185
|
+
|
|
170
186
|
|
|
171
187
|
class StreamRunAgentResponseFormatJSONSchemaTypedDict(TypedDict):
|
|
172
188
|
r"""
|
|
@@ -297,6 +313,22 @@ class StreamRunAgentModelConfigurationStreamOptions(BaseModel):
|
|
|
297
313
|
include_usage: Optional[bool] = None
|
|
298
314
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
299
315
|
|
|
316
|
+
@model_serializer(mode="wrap")
|
|
317
|
+
def serialize_model(self, handler):
|
|
318
|
+
optional_fields = set(["include_usage"])
|
|
319
|
+
serialized = handler(self)
|
|
320
|
+
m = {}
|
|
321
|
+
|
|
322
|
+
for n, f in type(self).model_fields.items():
|
|
323
|
+
k = f.alias or n
|
|
324
|
+
val = serialized.get(k)
|
|
325
|
+
|
|
326
|
+
if val != UNSET_SENTINEL:
|
|
327
|
+
if val is not None or k not in optional_fields:
|
|
328
|
+
m[k] = val
|
|
329
|
+
|
|
330
|
+
return m
|
|
331
|
+
|
|
300
332
|
|
|
301
333
|
StreamRunAgentModelConfigurationThinkingTypedDict = TypeAliasType(
|
|
302
334
|
"StreamRunAgentModelConfigurationThinkingTypedDict",
|
|
@@ -339,6 +371,22 @@ class StreamRunAgentToolChoice2(BaseModel):
|
|
|
339
371
|
type: Optional[StreamRunAgentToolChoiceType] = None
|
|
340
372
|
r"""The type of the tool. Currently, only function is supported."""
|
|
341
373
|
|
|
374
|
+
@model_serializer(mode="wrap")
|
|
375
|
+
def serialize_model(self, handler):
|
|
376
|
+
optional_fields = set(["type"])
|
|
377
|
+
serialized = handler(self)
|
|
378
|
+
m = {}
|
|
379
|
+
|
|
380
|
+
for n, f in type(self).model_fields.items():
|
|
381
|
+
k = f.alias or n
|
|
382
|
+
val = serialized.get(k)
|
|
383
|
+
|
|
384
|
+
if val != UNSET_SENTINEL:
|
|
385
|
+
if val is not None or k not in optional_fields:
|
|
386
|
+
m[k] = val
|
|
387
|
+
|
|
388
|
+
return m
|
|
389
|
+
|
|
342
390
|
|
|
343
391
|
StreamRunAgentToolChoice1 = Literal[
|
|
344
392
|
"none",
|
|
@@ -405,9 +453,161 @@ class StreamRunAgentModelConfigurationGuardrails(BaseModel):
|
|
|
405
453
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
406
454
|
|
|
407
455
|
|
|
456
|
+
class StreamRunAgentModelConfigurationFallbacksTypedDict(TypedDict):
|
|
457
|
+
model: str
|
|
458
|
+
r"""Fallback model identifier"""
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
class StreamRunAgentModelConfigurationFallbacks(BaseModel):
|
|
462
|
+
model: str
|
|
463
|
+
r"""Fallback model identifier"""
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
class StreamRunAgentModelConfigurationRetryTypedDict(TypedDict):
|
|
467
|
+
r"""Retry configuration for the request"""
|
|
468
|
+
|
|
469
|
+
count: NotRequired[float]
|
|
470
|
+
r"""Number of retry attempts (1-5)"""
|
|
471
|
+
on_codes: NotRequired[List[float]]
|
|
472
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
class StreamRunAgentModelConfigurationRetry(BaseModel):
|
|
476
|
+
r"""Retry configuration for the request"""
|
|
477
|
+
|
|
478
|
+
count: Optional[float] = 3
|
|
479
|
+
r"""Number of retry attempts (1-5)"""
|
|
480
|
+
|
|
481
|
+
on_codes: Optional[List[float]] = None
|
|
482
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
483
|
+
|
|
484
|
+
@model_serializer(mode="wrap")
|
|
485
|
+
def serialize_model(self, handler):
|
|
486
|
+
optional_fields = set(["count", "on_codes"])
|
|
487
|
+
serialized = handler(self)
|
|
488
|
+
m = {}
|
|
489
|
+
|
|
490
|
+
for n, f in type(self).model_fields.items():
|
|
491
|
+
k = f.alias or n
|
|
492
|
+
val = serialized.get(k)
|
|
493
|
+
|
|
494
|
+
if val != UNSET_SENTINEL:
|
|
495
|
+
if val is not None or k not in optional_fields:
|
|
496
|
+
m[k] = val
|
|
497
|
+
|
|
498
|
+
return m
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
StreamRunAgentModelConfigurationType = Literal["exact_match",]
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
class StreamRunAgentModelConfigurationCacheTypedDict(TypedDict):
|
|
505
|
+
r"""Cache configuration for the request."""
|
|
506
|
+
|
|
507
|
+
type: StreamRunAgentModelConfigurationType
|
|
508
|
+
ttl: NotRequired[float]
|
|
509
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
class StreamRunAgentModelConfigurationCache(BaseModel):
|
|
513
|
+
r"""Cache configuration for the request."""
|
|
514
|
+
|
|
515
|
+
type: StreamRunAgentModelConfigurationType
|
|
516
|
+
|
|
517
|
+
ttl: Optional[float] = 1800
|
|
518
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
519
|
+
|
|
520
|
+
@model_serializer(mode="wrap")
|
|
521
|
+
def serialize_model(self, handler):
|
|
522
|
+
optional_fields = set(["ttl"])
|
|
523
|
+
serialized = handler(self)
|
|
524
|
+
m = {}
|
|
525
|
+
|
|
526
|
+
for n, f in type(self).model_fields.items():
|
|
527
|
+
k = f.alias or n
|
|
528
|
+
val = serialized.get(k)
|
|
529
|
+
|
|
530
|
+
if val != UNSET_SENTINEL:
|
|
531
|
+
if val is not None or k not in optional_fields:
|
|
532
|
+
m[k] = val
|
|
533
|
+
|
|
534
|
+
return m
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
StreamRunAgentLoadBalancerType = Literal["weight_based",]
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
class StreamRunAgentLoadBalancerModelsTypedDict(TypedDict):
|
|
541
|
+
model: str
|
|
542
|
+
r"""Model identifier for load balancing"""
|
|
543
|
+
weight: NotRequired[float]
|
|
544
|
+
r"""Weight assigned to this model for load balancing"""
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
class StreamRunAgentLoadBalancerModels(BaseModel):
|
|
548
|
+
model: str
|
|
549
|
+
r"""Model identifier for load balancing"""
|
|
550
|
+
|
|
551
|
+
weight: Optional[float] = 0.5
|
|
552
|
+
r"""Weight assigned to this model for load balancing"""
|
|
553
|
+
|
|
554
|
+
@model_serializer(mode="wrap")
|
|
555
|
+
def serialize_model(self, handler):
|
|
556
|
+
optional_fields = set(["weight"])
|
|
557
|
+
serialized = handler(self)
|
|
558
|
+
m = {}
|
|
559
|
+
|
|
560
|
+
for n, f in type(self).model_fields.items():
|
|
561
|
+
k = f.alias or n
|
|
562
|
+
val = serialized.get(k)
|
|
563
|
+
|
|
564
|
+
if val != UNSET_SENTINEL:
|
|
565
|
+
if val is not None or k not in optional_fields:
|
|
566
|
+
m[k] = val
|
|
567
|
+
|
|
568
|
+
return m
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
class StreamRunAgentLoadBalancer1TypedDict(TypedDict):
|
|
572
|
+
type: StreamRunAgentLoadBalancerType
|
|
573
|
+
models: List[StreamRunAgentLoadBalancerModelsTypedDict]
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
class StreamRunAgentLoadBalancer1(BaseModel):
|
|
577
|
+
type: StreamRunAgentLoadBalancerType
|
|
578
|
+
|
|
579
|
+
models: List[StreamRunAgentLoadBalancerModels]
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
StreamRunAgentModelConfigurationLoadBalancerTypedDict = (
|
|
583
|
+
StreamRunAgentLoadBalancer1TypedDict
|
|
584
|
+
)
|
|
585
|
+
r"""Load balancer configuration for the request."""
|
|
586
|
+
|
|
587
|
+
|
|
588
|
+
StreamRunAgentModelConfigurationLoadBalancer = StreamRunAgentLoadBalancer1
|
|
589
|
+
r"""Load balancer configuration for the request."""
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
class StreamRunAgentModelConfigurationTimeoutTypedDict(TypedDict):
|
|
593
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
594
|
+
|
|
595
|
+
call_timeout: float
|
|
596
|
+
r"""Timeout value in milliseconds"""
|
|
597
|
+
|
|
598
|
+
|
|
599
|
+
class StreamRunAgentModelConfigurationTimeout(BaseModel):
|
|
600
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
601
|
+
|
|
602
|
+
call_timeout: float
|
|
603
|
+
r"""Timeout value in milliseconds"""
|
|
604
|
+
|
|
605
|
+
|
|
408
606
|
class StreamRunAgentModelConfigurationParametersTypedDict(TypedDict):
|
|
409
607
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
410
608
|
|
|
609
|
+
name: NotRequired[str]
|
|
610
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
411
611
|
audio: NotRequired[Nullable[StreamRunAgentModelConfigurationAudioTypedDict]]
|
|
412
612
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
413
613
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -466,11 +666,24 @@ class StreamRunAgentModelConfigurationParametersTypedDict(TypedDict):
|
|
|
466
666
|
r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
|
|
467
667
|
guardrails: NotRequired[List[StreamRunAgentModelConfigurationGuardrailsTypedDict]]
|
|
468
668
|
r"""A list of guardrails to apply to the request."""
|
|
669
|
+
fallbacks: NotRequired[List[StreamRunAgentModelConfigurationFallbacksTypedDict]]
|
|
670
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
671
|
+
retry: NotRequired[StreamRunAgentModelConfigurationRetryTypedDict]
|
|
672
|
+
r"""Retry configuration for the request"""
|
|
673
|
+
cache: NotRequired[StreamRunAgentModelConfigurationCacheTypedDict]
|
|
674
|
+
r"""Cache configuration for the request."""
|
|
675
|
+
load_balancer: NotRequired[StreamRunAgentModelConfigurationLoadBalancerTypedDict]
|
|
676
|
+
r"""Load balancer configuration for the request."""
|
|
677
|
+
timeout: NotRequired[StreamRunAgentModelConfigurationTimeoutTypedDict]
|
|
678
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
469
679
|
|
|
470
680
|
|
|
471
681
|
class StreamRunAgentModelConfigurationParameters(BaseModel):
|
|
472
682
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
473
683
|
|
|
684
|
+
name: Optional[str] = None
|
|
685
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
686
|
+
|
|
474
687
|
audio: OptionalNullable[StreamRunAgentModelConfigurationAudio] = UNSET
|
|
475
688
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
476
689
|
|
|
@@ -551,77 +764,97 @@ class StreamRunAgentModelConfigurationParameters(BaseModel):
|
|
|
551
764
|
guardrails: Optional[List[StreamRunAgentModelConfigurationGuardrails]] = None
|
|
552
765
|
r"""A list of guardrails to apply to the request."""
|
|
553
766
|
|
|
767
|
+
fallbacks: Optional[List[StreamRunAgentModelConfigurationFallbacks]] = None
|
|
768
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
769
|
+
|
|
770
|
+
retry: Optional[StreamRunAgentModelConfigurationRetry] = None
|
|
771
|
+
r"""Retry configuration for the request"""
|
|
772
|
+
|
|
773
|
+
cache: Optional[StreamRunAgentModelConfigurationCache] = None
|
|
774
|
+
r"""Cache configuration for the request."""
|
|
775
|
+
|
|
776
|
+
load_balancer: Optional[StreamRunAgentModelConfigurationLoadBalancer] = None
|
|
777
|
+
r"""Load balancer configuration for the request."""
|
|
778
|
+
|
|
779
|
+
timeout: Optional[StreamRunAgentModelConfigurationTimeout] = None
|
|
780
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
781
|
+
|
|
554
782
|
@model_serializer(mode="wrap")
|
|
555
783
|
def serialize_model(self, handler):
|
|
556
|
-
optional_fields =
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
784
|
+
optional_fields = set(
|
|
785
|
+
[
|
|
786
|
+
"name",
|
|
787
|
+
"audio",
|
|
788
|
+
"frequency_penalty",
|
|
789
|
+
"max_tokens",
|
|
790
|
+
"max_completion_tokens",
|
|
791
|
+
"logprobs",
|
|
792
|
+
"top_logprobs",
|
|
793
|
+
"n",
|
|
794
|
+
"presence_penalty",
|
|
795
|
+
"response_format",
|
|
796
|
+
"reasoning_effort",
|
|
797
|
+
"verbosity",
|
|
798
|
+
"seed",
|
|
799
|
+
"stop",
|
|
800
|
+
"stream_options",
|
|
801
|
+
"thinking",
|
|
802
|
+
"temperature",
|
|
803
|
+
"top_p",
|
|
804
|
+
"top_k",
|
|
805
|
+
"tool_choice",
|
|
806
|
+
"parallel_tool_calls",
|
|
807
|
+
"modalities",
|
|
808
|
+
"guardrails",
|
|
809
|
+
"fallbacks",
|
|
810
|
+
"retry",
|
|
811
|
+
"cache",
|
|
812
|
+
"load_balancer",
|
|
813
|
+
"timeout",
|
|
814
|
+
]
|
|
815
|
+
)
|
|
816
|
+
nullable_fields = set(
|
|
817
|
+
[
|
|
818
|
+
"audio",
|
|
819
|
+
"frequency_penalty",
|
|
820
|
+
"max_tokens",
|
|
821
|
+
"max_completion_tokens",
|
|
822
|
+
"logprobs",
|
|
823
|
+
"top_logprobs",
|
|
824
|
+
"n",
|
|
825
|
+
"presence_penalty",
|
|
826
|
+
"seed",
|
|
827
|
+
"stop",
|
|
828
|
+
"stream_options",
|
|
829
|
+
"temperature",
|
|
830
|
+
"top_p",
|
|
831
|
+
"top_k",
|
|
832
|
+
"modalities",
|
|
833
|
+
]
|
|
834
|
+
)
|
|
599
835
|
serialized = handler(self)
|
|
600
|
-
|
|
601
836
|
m = {}
|
|
602
837
|
|
|
603
838
|
for n, f in type(self).model_fields.items():
|
|
604
839
|
k = f.alias or n
|
|
605
840
|
val = serialized.get(k)
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
):
|
|
619
|
-
m[k] = val
|
|
841
|
+
is_nullable_and_explicitly_set = (
|
|
842
|
+
k in nullable_fields
|
|
843
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
if val != UNSET_SENTINEL:
|
|
847
|
+
if (
|
|
848
|
+
val is not None
|
|
849
|
+
or k not in optional_fields
|
|
850
|
+
or is_nullable_and_explicitly_set
|
|
851
|
+
):
|
|
852
|
+
m[k] = val
|
|
620
853
|
|
|
621
854
|
return m
|
|
622
855
|
|
|
623
856
|
|
|
624
|
-
class
|
|
857
|
+
class StreamRunAgentModelConfigurationAgentsRetryTypedDict(TypedDict):
|
|
625
858
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
626
859
|
|
|
627
860
|
count: NotRequired[float]
|
|
@@ -630,7 +863,7 @@ class StreamRunAgentModelConfigurationRetryTypedDict(TypedDict):
|
|
|
630
863
|
r"""HTTP status codes that trigger retry logic"""
|
|
631
864
|
|
|
632
865
|
|
|
633
|
-
class
|
|
866
|
+
class StreamRunAgentModelConfigurationAgentsRetry(BaseModel):
|
|
634
867
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
635
868
|
|
|
636
869
|
count: Optional[float] = 3
|
|
@@ -639,6 +872,22 @@ class StreamRunAgentModelConfigurationRetry(BaseModel):
|
|
|
639
872
|
on_codes: Optional[List[float]] = None
|
|
640
873
|
r"""HTTP status codes that trigger retry logic"""
|
|
641
874
|
|
|
875
|
+
@model_serializer(mode="wrap")
|
|
876
|
+
def serialize_model(self, handler):
|
|
877
|
+
optional_fields = set(["count", "on_codes"])
|
|
878
|
+
serialized = handler(self)
|
|
879
|
+
m = {}
|
|
880
|
+
|
|
881
|
+
for n, f in type(self).model_fields.items():
|
|
882
|
+
k = f.alias or n
|
|
883
|
+
val = serialized.get(k)
|
|
884
|
+
|
|
885
|
+
if val != UNSET_SENTINEL:
|
|
886
|
+
if val is not None or k not in optional_fields:
|
|
887
|
+
m[k] = val
|
|
888
|
+
|
|
889
|
+
return m
|
|
890
|
+
|
|
642
891
|
|
|
643
892
|
class StreamRunAgentModelConfiguration2TypedDict(TypedDict):
|
|
644
893
|
r"""
|
|
@@ -650,7 +899,7 @@ class StreamRunAgentModelConfiguration2TypedDict(TypedDict):
|
|
|
650
899
|
r"""A model ID string (e.g., `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`). Only models that support tool calling can be used with agents."""
|
|
651
900
|
parameters: NotRequired[StreamRunAgentModelConfigurationParametersTypedDict]
|
|
652
901
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
653
|
-
retry: NotRequired[
|
|
902
|
+
retry: NotRequired[StreamRunAgentModelConfigurationAgentsRetryTypedDict]
|
|
654
903
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
655
904
|
|
|
656
905
|
|
|
@@ -666,9 +915,25 @@ class StreamRunAgentModelConfiguration2(BaseModel):
|
|
|
666
915
|
parameters: Optional[StreamRunAgentModelConfigurationParameters] = None
|
|
667
916
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
668
917
|
|
|
669
|
-
retry: Optional[
|
|
918
|
+
retry: Optional[StreamRunAgentModelConfigurationAgentsRetry] = None
|
|
670
919
|
r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
|
|
671
920
|
|
|
921
|
+
@model_serializer(mode="wrap")
|
|
922
|
+
def serialize_model(self, handler):
|
|
923
|
+
optional_fields = set(["parameters", "retry"])
|
|
924
|
+
serialized = handler(self)
|
|
925
|
+
m = {}
|
|
926
|
+
|
|
927
|
+
for n, f in type(self).model_fields.items():
|
|
928
|
+
k = f.alias or n
|
|
929
|
+
val = serialized.get(k)
|
|
930
|
+
|
|
931
|
+
if val != UNSET_SENTINEL:
|
|
932
|
+
if val is not None or k not in optional_fields:
|
|
933
|
+
m[k] = val
|
|
934
|
+
|
|
935
|
+
return m
|
|
936
|
+
|
|
672
937
|
|
|
673
938
|
StreamRunAgentModelConfigurationTypedDict = TypeAliasType(
|
|
674
939
|
"StreamRunAgentModelConfigurationTypedDict",
|
|
@@ -756,6 +1021,22 @@ class StreamRunAgentResponseFormatAgentsRequestRequestBodyJSONSchema(BaseModel):
|
|
|
756
1021
|
strict: Optional[bool] = False
|
|
757
1022
|
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
|
|
758
1023
|
|
|
1024
|
+
@model_serializer(mode="wrap")
|
|
1025
|
+
def serialize_model(self, handler):
|
|
1026
|
+
optional_fields = set(["description", "schema", "strict"])
|
|
1027
|
+
serialized = handler(self)
|
|
1028
|
+
m = {}
|
|
1029
|
+
|
|
1030
|
+
for n, f in type(self).model_fields.items():
|
|
1031
|
+
k = f.alias or n
|
|
1032
|
+
val = serialized.get(k)
|
|
1033
|
+
|
|
1034
|
+
if val != UNSET_SENTINEL:
|
|
1035
|
+
if val is not None or k not in optional_fields:
|
|
1036
|
+
m[k] = val
|
|
1037
|
+
|
|
1038
|
+
return m
|
|
1039
|
+
|
|
759
1040
|
|
|
760
1041
|
class StreamRunAgentResponseFormatAgentsRequestJSONSchemaTypedDict(TypedDict):
|
|
761
1042
|
r"""
|
|
@@ -890,6 +1171,22 @@ class StreamRunAgentFallbackModelConfigurationStreamOptions(BaseModel):
|
|
|
890
1171
|
include_usage: Optional[bool] = None
|
|
891
1172
|
r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
|
|
892
1173
|
|
|
1174
|
+
@model_serializer(mode="wrap")
|
|
1175
|
+
def serialize_model(self, handler):
|
|
1176
|
+
optional_fields = set(["include_usage"])
|
|
1177
|
+
serialized = handler(self)
|
|
1178
|
+
m = {}
|
|
1179
|
+
|
|
1180
|
+
for n, f in type(self).model_fields.items():
|
|
1181
|
+
k = f.alias or n
|
|
1182
|
+
val = serialized.get(k)
|
|
1183
|
+
|
|
1184
|
+
if val != UNSET_SENTINEL:
|
|
1185
|
+
if val is not None or k not in optional_fields:
|
|
1186
|
+
m[k] = val
|
|
1187
|
+
|
|
1188
|
+
return m
|
|
1189
|
+
|
|
893
1190
|
|
|
894
1191
|
StreamRunAgentFallbackModelConfigurationThinkingTypedDict = TypeAliasType(
|
|
895
1192
|
"StreamRunAgentFallbackModelConfigurationThinkingTypedDict",
|
|
@@ -932,6 +1229,22 @@ class StreamRunAgentToolChoiceAgents2(BaseModel):
|
|
|
932
1229
|
type: Optional[StreamRunAgentToolChoiceAgentsType] = None
|
|
933
1230
|
r"""The type of the tool. Currently, only function is supported."""
|
|
934
1231
|
|
|
1232
|
+
@model_serializer(mode="wrap")
|
|
1233
|
+
def serialize_model(self, handler):
|
|
1234
|
+
optional_fields = set(["type"])
|
|
1235
|
+
serialized = handler(self)
|
|
1236
|
+
m = {}
|
|
1237
|
+
|
|
1238
|
+
for n, f in type(self).model_fields.items():
|
|
1239
|
+
k = f.alias or n
|
|
1240
|
+
val = serialized.get(k)
|
|
1241
|
+
|
|
1242
|
+
if val != UNSET_SENTINEL:
|
|
1243
|
+
if val is not None or k not in optional_fields:
|
|
1244
|
+
m[k] = val
|
|
1245
|
+
|
|
1246
|
+
return m
|
|
1247
|
+
|
|
935
1248
|
|
|
936
1249
|
StreamRunAgentToolChoiceAgents1 = Literal[
|
|
937
1250
|
"none",
|
|
@@ -999,65 +1312,217 @@ class StreamRunAgentFallbackModelConfigurationGuardrails(BaseModel):
|
|
|
999
1312
|
r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
|
|
1000
1313
|
|
|
1001
1314
|
|
|
1002
|
-
class
|
|
1003
|
-
|
|
1315
|
+
class StreamRunAgentFallbackModelConfigurationFallbacksTypedDict(TypedDict):
|
|
1316
|
+
model: str
|
|
1317
|
+
r"""Fallback model identifier"""
|
|
1004
1318
|
|
|
1005
|
-
audio: NotRequired[Nullable[StreamRunAgentFallbackModelConfigurationAudioTypedDict]]
|
|
1006
|
-
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1007
|
-
frequency_penalty: NotRequired[Nullable[float]]
|
|
1008
|
-
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
1009
|
-
max_tokens: NotRequired[Nullable[int]]
|
|
1010
|
-
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1011
1319
|
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
1016
|
-
logprobs: NotRequired[Nullable[bool]]
|
|
1017
|
-
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
1018
|
-
top_logprobs: NotRequired[Nullable[int]]
|
|
1019
|
-
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
1020
|
-
n: NotRequired[Nullable[int]]
|
|
1021
|
-
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
1022
|
-
presence_penalty: NotRequired[Nullable[float]]
|
|
1023
|
-
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
1024
|
-
response_format: NotRequired[
|
|
1025
|
-
StreamRunAgentFallbackModelConfigurationResponseFormatTypedDict
|
|
1026
|
-
]
|
|
1027
|
-
r"""An object specifying the format that the model must output"""
|
|
1028
|
-
reasoning_effort: NotRequired[
|
|
1029
|
-
StreamRunAgentFallbackModelConfigurationReasoningEffort
|
|
1030
|
-
]
|
|
1031
|
-
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1320
|
+
class StreamRunAgentFallbackModelConfigurationFallbacks(BaseModel):
|
|
1321
|
+
model: str
|
|
1322
|
+
r"""Fallback model identifier"""
|
|
1032
1323
|
|
|
1033
|
-
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1034
|
-
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1035
|
-
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1036
|
-
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1037
1324
|
|
|
1038
|
-
|
|
1039
|
-
"""
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
r"""
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1325
|
+
class StreamRunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
|
|
1326
|
+
r"""Retry configuration for the request"""
|
|
1327
|
+
|
|
1328
|
+
count: NotRequired[float]
|
|
1329
|
+
r"""Number of retry attempts (1-5)"""
|
|
1330
|
+
on_codes: NotRequired[List[float]]
|
|
1331
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1332
|
+
|
|
1333
|
+
|
|
1334
|
+
class StreamRunAgentFallbackModelConfigurationRetry(BaseModel):
|
|
1335
|
+
r"""Retry configuration for the request"""
|
|
1336
|
+
|
|
1337
|
+
count: Optional[float] = 3
|
|
1338
|
+
r"""Number of retry attempts (1-5)"""
|
|
1339
|
+
|
|
1340
|
+
on_codes: Optional[List[float]] = None
|
|
1341
|
+
r"""HTTP status codes that trigger retry logic"""
|
|
1342
|
+
|
|
1343
|
+
@model_serializer(mode="wrap")
|
|
1344
|
+
def serialize_model(self, handler):
|
|
1345
|
+
optional_fields = set(["count", "on_codes"])
|
|
1346
|
+
serialized = handler(self)
|
|
1347
|
+
m = {}
|
|
1348
|
+
|
|
1349
|
+
for n, f in type(self).model_fields.items():
|
|
1350
|
+
k = f.alias or n
|
|
1351
|
+
val = serialized.get(k)
|
|
1352
|
+
|
|
1353
|
+
if val != UNSET_SENTINEL:
|
|
1354
|
+
if val is not None or k not in optional_fields:
|
|
1355
|
+
m[k] = val
|
|
1356
|
+
|
|
1357
|
+
return m
|
|
1358
|
+
|
|
1359
|
+
|
|
1360
|
+
StreamRunAgentFallbackModelConfigurationType = Literal["exact_match",]
|
|
1361
|
+
|
|
1362
|
+
|
|
1363
|
+
class StreamRunAgentFallbackModelConfigurationCacheTypedDict(TypedDict):
|
|
1364
|
+
r"""Cache configuration for the request."""
|
|
1365
|
+
|
|
1366
|
+
type: StreamRunAgentFallbackModelConfigurationType
|
|
1367
|
+
ttl: NotRequired[float]
|
|
1368
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1369
|
+
|
|
1370
|
+
|
|
1371
|
+
class StreamRunAgentFallbackModelConfigurationCache(BaseModel):
|
|
1372
|
+
r"""Cache configuration for the request."""
|
|
1373
|
+
|
|
1374
|
+
type: StreamRunAgentFallbackModelConfigurationType
|
|
1375
|
+
|
|
1376
|
+
ttl: Optional[float] = 1800
|
|
1377
|
+
r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
|
|
1378
|
+
|
|
1379
|
+
@model_serializer(mode="wrap")
|
|
1380
|
+
def serialize_model(self, handler):
|
|
1381
|
+
optional_fields = set(["ttl"])
|
|
1382
|
+
serialized = handler(self)
|
|
1383
|
+
m = {}
|
|
1384
|
+
|
|
1385
|
+
for n, f in type(self).model_fields.items():
|
|
1386
|
+
k = f.alias or n
|
|
1387
|
+
val = serialized.get(k)
|
|
1388
|
+
|
|
1389
|
+
if val != UNSET_SENTINEL:
|
|
1390
|
+
if val is not None or k not in optional_fields:
|
|
1391
|
+
m[k] = val
|
|
1392
|
+
|
|
1393
|
+
return m
|
|
1394
|
+
|
|
1395
|
+
|
|
1396
|
+
StreamRunAgentLoadBalancerAgentsType = Literal["weight_based",]
|
|
1397
|
+
|
|
1398
|
+
|
|
1399
|
+
class StreamRunAgentLoadBalancerAgentsModelsTypedDict(TypedDict):
|
|
1400
|
+
model: str
|
|
1401
|
+
r"""Model identifier for load balancing"""
|
|
1402
|
+
weight: NotRequired[float]
|
|
1403
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1404
|
+
|
|
1405
|
+
|
|
1406
|
+
class StreamRunAgentLoadBalancerAgentsModels(BaseModel):
|
|
1407
|
+
model: str
|
|
1408
|
+
r"""Model identifier for load balancing"""
|
|
1409
|
+
|
|
1410
|
+
weight: Optional[float] = 0.5
|
|
1411
|
+
r"""Weight assigned to this model for load balancing"""
|
|
1412
|
+
|
|
1413
|
+
@model_serializer(mode="wrap")
|
|
1414
|
+
def serialize_model(self, handler):
|
|
1415
|
+
optional_fields = set(["weight"])
|
|
1416
|
+
serialized = handler(self)
|
|
1417
|
+
m = {}
|
|
1418
|
+
|
|
1419
|
+
for n, f in type(self).model_fields.items():
|
|
1420
|
+
k = f.alias or n
|
|
1421
|
+
val = serialized.get(k)
|
|
1422
|
+
|
|
1423
|
+
if val != UNSET_SENTINEL:
|
|
1424
|
+
if val is not None or k not in optional_fields:
|
|
1425
|
+
m[k] = val
|
|
1426
|
+
|
|
1427
|
+
return m
|
|
1428
|
+
|
|
1429
|
+
|
|
1430
|
+
class StreamRunAgentLoadBalancerAgents1TypedDict(TypedDict):
|
|
1431
|
+
type: StreamRunAgentLoadBalancerAgentsType
|
|
1432
|
+
models: List[StreamRunAgentLoadBalancerAgentsModelsTypedDict]
|
|
1433
|
+
|
|
1434
|
+
|
|
1435
|
+
class StreamRunAgentLoadBalancerAgents1(BaseModel):
|
|
1436
|
+
type: StreamRunAgentLoadBalancerAgentsType
|
|
1437
|
+
|
|
1438
|
+
models: List[StreamRunAgentLoadBalancerAgentsModels]
|
|
1439
|
+
|
|
1440
|
+
|
|
1441
|
+
StreamRunAgentFallbackModelConfigurationLoadBalancerTypedDict = (
|
|
1442
|
+
StreamRunAgentLoadBalancerAgents1TypedDict
|
|
1443
|
+
)
|
|
1444
|
+
r"""Load balancer configuration for the request."""
|
|
1445
|
+
|
|
1446
|
+
|
|
1447
|
+
StreamRunAgentFallbackModelConfigurationLoadBalancer = StreamRunAgentLoadBalancerAgents1
|
|
1448
|
+
r"""Load balancer configuration for the request."""
|
|
1449
|
+
|
|
1450
|
+
|
|
1451
|
+
class StreamRunAgentFallbackModelConfigurationTimeoutTypedDict(TypedDict):
|
|
1452
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1453
|
+
|
|
1454
|
+
call_timeout: float
|
|
1455
|
+
r"""Timeout value in milliseconds"""
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
class StreamRunAgentFallbackModelConfigurationTimeout(BaseModel):
|
|
1459
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1460
|
+
|
|
1461
|
+
call_timeout: float
|
|
1462
|
+
r"""Timeout value in milliseconds"""
|
|
1463
|
+
|
|
1464
|
+
|
|
1465
|
+
class StreamRunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
|
|
1466
|
+
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1467
|
+
|
|
1468
|
+
name: NotRequired[str]
|
|
1469
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1470
|
+
audio: NotRequired[Nullable[StreamRunAgentFallbackModelConfigurationAudioTypedDict]]
|
|
1471
|
+
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1472
|
+
frequency_penalty: NotRequired[Nullable[float]]
|
|
1473
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
|
|
1474
|
+
max_tokens: NotRequired[Nullable[int]]
|
|
1475
|
+
r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
1476
|
+
|
|
1477
|
+
This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
|
|
1478
|
+
"""
|
|
1479
|
+
max_completion_tokens: NotRequired[Nullable[int]]
|
|
1480
|
+
r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
|
|
1481
|
+
logprobs: NotRequired[Nullable[bool]]
|
|
1482
|
+
r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
|
|
1483
|
+
top_logprobs: NotRequired[Nullable[int]]
|
|
1484
|
+
r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
|
|
1485
|
+
n: NotRequired[Nullable[int]]
|
|
1486
|
+
r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
|
|
1487
|
+
presence_penalty: NotRequired[Nullable[float]]
|
|
1488
|
+
r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
|
|
1489
|
+
response_format: NotRequired[
|
|
1490
|
+
StreamRunAgentFallbackModelConfigurationResponseFormatTypedDict
|
|
1491
|
+
]
|
|
1492
|
+
r"""An object specifying the format that the model must output"""
|
|
1493
|
+
reasoning_effort: NotRequired[
|
|
1494
|
+
StreamRunAgentFallbackModelConfigurationReasoningEffort
|
|
1495
|
+
]
|
|
1496
|
+
r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
1497
|
+
|
|
1498
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
|
|
1499
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
|
|
1500
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1501
|
+
- `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1502
|
+
|
|
1503
|
+
Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
|
|
1504
|
+
"""
|
|
1505
|
+
verbosity: NotRequired[str]
|
|
1506
|
+
r"""Adjusts response verbosity. Lower levels yield shorter answers."""
|
|
1507
|
+
seed: NotRequired[Nullable[float]]
|
|
1508
|
+
r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
|
|
1509
|
+
stop: NotRequired[Nullable[StreamRunAgentFallbackModelConfigurationStopTypedDict]]
|
|
1510
|
+
r"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
1511
|
+
stream_options: NotRequired[
|
|
1512
|
+
Nullable[StreamRunAgentFallbackModelConfigurationStreamOptionsTypedDict]
|
|
1513
|
+
]
|
|
1514
|
+
r"""Options for streaming response. Only set this when you set stream: true."""
|
|
1515
|
+
thinking: NotRequired[StreamRunAgentFallbackModelConfigurationThinkingTypedDict]
|
|
1516
|
+
temperature: NotRequired[Nullable[float]]
|
|
1517
|
+
r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
|
|
1518
|
+
top_p: NotRequired[Nullable[float]]
|
|
1519
|
+
r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
|
|
1520
|
+
top_k: NotRequired[Nullable[float]]
|
|
1521
|
+
r"""Limits the model to consider only the top k most likely tokens at each step."""
|
|
1522
|
+
tool_choice: NotRequired[
|
|
1523
|
+
StreamRunAgentFallbackModelConfigurationToolChoiceTypedDict
|
|
1524
|
+
]
|
|
1525
|
+
r"""Controls which (if any) tool is called by the model."""
|
|
1061
1526
|
parallel_tool_calls: NotRequired[bool]
|
|
1062
1527
|
r"""Whether to enable parallel function calling during tool use."""
|
|
1063
1528
|
modalities: NotRequired[
|
|
@@ -1068,11 +1533,28 @@ class StreamRunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
|
|
|
1068
1533
|
List[StreamRunAgentFallbackModelConfigurationGuardrailsTypedDict]
|
|
1069
1534
|
]
|
|
1070
1535
|
r"""A list of guardrails to apply to the request."""
|
|
1536
|
+
fallbacks: NotRequired[
|
|
1537
|
+
List[StreamRunAgentFallbackModelConfigurationFallbacksTypedDict]
|
|
1538
|
+
]
|
|
1539
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1540
|
+
retry: NotRequired[StreamRunAgentFallbackModelConfigurationRetryTypedDict]
|
|
1541
|
+
r"""Retry configuration for the request"""
|
|
1542
|
+
cache: NotRequired[StreamRunAgentFallbackModelConfigurationCacheTypedDict]
|
|
1543
|
+
r"""Cache configuration for the request."""
|
|
1544
|
+
load_balancer: NotRequired[
|
|
1545
|
+
StreamRunAgentFallbackModelConfigurationLoadBalancerTypedDict
|
|
1546
|
+
]
|
|
1547
|
+
r"""Load balancer configuration for the request."""
|
|
1548
|
+
timeout: NotRequired[StreamRunAgentFallbackModelConfigurationTimeoutTypedDict]
|
|
1549
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1071
1550
|
|
|
1072
1551
|
|
|
1073
1552
|
class StreamRunAgentFallbackModelConfigurationParameters(BaseModel):
|
|
1074
1553
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1075
1554
|
|
|
1555
|
+
name: Optional[str] = None
|
|
1556
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1557
|
+
|
|
1076
1558
|
audio: OptionalNullable[StreamRunAgentFallbackModelConfigurationAudio] = UNSET
|
|
1077
1559
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1078
1560
|
|
|
@@ -1159,77 +1641,97 @@ class StreamRunAgentFallbackModelConfigurationParameters(BaseModel):
|
|
|
1159
1641
|
)
|
|
1160
1642
|
r"""A list of guardrails to apply to the request."""
|
|
1161
1643
|
|
|
1644
|
+
fallbacks: Optional[List[StreamRunAgentFallbackModelConfigurationFallbacks]] = None
|
|
1645
|
+
r"""Array of fallback models to use if primary model fails"""
|
|
1646
|
+
|
|
1647
|
+
retry: Optional[StreamRunAgentFallbackModelConfigurationRetry] = None
|
|
1648
|
+
r"""Retry configuration for the request"""
|
|
1649
|
+
|
|
1650
|
+
cache: Optional[StreamRunAgentFallbackModelConfigurationCache] = None
|
|
1651
|
+
r"""Cache configuration for the request."""
|
|
1652
|
+
|
|
1653
|
+
load_balancer: Optional[StreamRunAgentFallbackModelConfigurationLoadBalancer] = None
|
|
1654
|
+
r"""Load balancer configuration for the request."""
|
|
1655
|
+
|
|
1656
|
+
timeout: Optional[StreamRunAgentFallbackModelConfigurationTimeout] = None
|
|
1657
|
+
r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
|
|
1658
|
+
|
|
1162
1659
|
@model_serializer(mode="wrap")
|
|
1163
1660
|
def serialize_model(self, handler):
|
|
1164
|
-
optional_fields =
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1661
|
+
optional_fields = set(
|
|
1662
|
+
[
|
|
1663
|
+
"name",
|
|
1664
|
+
"audio",
|
|
1665
|
+
"frequency_penalty",
|
|
1666
|
+
"max_tokens",
|
|
1667
|
+
"max_completion_tokens",
|
|
1668
|
+
"logprobs",
|
|
1669
|
+
"top_logprobs",
|
|
1670
|
+
"n",
|
|
1671
|
+
"presence_penalty",
|
|
1672
|
+
"response_format",
|
|
1673
|
+
"reasoning_effort",
|
|
1674
|
+
"verbosity",
|
|
1675
|
+
"seed",
|
|
1676
|
+
"stop",
|
|
1677
|
+
"stream_options",
|
|
1678
|
+
"thinking",
|
|
1679
|
+
"temperature",
|
|
1680
|
+
"top_p",
|
|
1681
|
+
"top_k",
|
|
1682
|
+
"tool_choice",
|
|
1683
|
+
"parallel_tool_calls",
|
|
1684
|
+
"modalities",
|
|
1685
|
+
"guardrails",
|
|
1686
|
+
"fallbacks",
|
|
1687
|
+
"retry",
|
|
1688
|
+
"cache",
|
|
1689
|
+
"load_balancer",
|
|
1690
|
+
"timeout",
|
|
1691
|
+
]
|
|
1692
|
+
)
|
|
1693
|
+
nullable_fields = set(
|
|
1694
|
+
[
|
|
1695
|
+
"audio",
|
|
1696
|
+
"frequency_penalty",
|
|
1697
|
+
"max_tokens",
|
|
1698
|
+
"max_completion_tokens",
|
|
1699
|
+
"logprobs",
|
|
1700
|
+
"top_logprobs",
|
|
1701
|
+
"n",
|
|
1702
|
+
"presence_penalty",
|
|
1703
|
+
"seed",
|
|
1704
|
+
"stop",
|
|
1705
|
+
"stream_options",
|
|
1706
|
+
"temperature",
|
|
1707
|
+
"top_p",
|
|
1708
|
+
"top_k",
|
|
1709
|
+
"modalities",
|
|
1710
|
+
]
|
|
1711
|
+
)
|
|
1207
1712
|
serialized = handler(self)
|
|
1208
|
-
|
|
1209
1713
|
m = {}
|
|
1210
1714
|
|
|
1211
1715
|
for n, f in type(self).model_fields.items():
|
|
1212
1716
|
k = f.alias or n
|
|
1213
1717
|
val = serialized.get(k)
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
):
|
|
1227
|
-
m[k] = val
|
|
1718
|
+
is_nullable_and_explicitly_set = (
|
|
1719
|
+
k in nullable_fields
|
|
1720
|
+
and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
|
|
1721
|
+
)
|
|
1722
|
+
|
|
1723
|
+
if val != UNSET_SENTINEL:
|
|
1724
|
+
if (
|
|
1725
|
+
val is not None
|
|
1726
|
+
or k not in optional_fields
|
|
1727
|
+
or is_nullable_and_explicitly_set
|
|
1728
|
+
):
|
|
1729
|
+
m[k] = val
|
|
1228
1730
|
|
|
1229
1731
|
return m
|
|
1230
1732
|
|
|
1231
1733
|
|
|
1232
|
-
class
|
|
1734
|
+
class StreamRunAgentFallbackModelConfigurationAgentsRetryTypedDict(TypedDict):
|
|
1233
1735
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1234
1736
|
|
|
1235
1737
|
count: NotRequired[float]
|
|
@@ -1238,7 +1740,7 @@ class StreamRunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
|
|
|
1238
1740
|
r"""HTTP status codes that trigger retry logic"""
|
|
1239
1741
|
|
|
1240
1742
|
|
|
1241
|
-
class
|
|
1743
|
+
class StreamRunAgentFallbackModelConfigurationAgentsRetry(BaseModel):
|
|
1242
1744
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1243
1745
|
|
|
1244
1746
|
count: Optional[float] = 3
|
|
@@ -1247,6 +1749,22 @@ class StreamRunAgentFallbackModelConfigurationRetry(BaseModel):
|
|
|
1247
1749
|
on_codes: Optional[List[float]] = None
|
|
1248
1750
|
r"""HTTP status codes that trigger retry logic"""
|
|
1249
1751
|
|
|
1752
|
+
@model_serializer(mode="wrap")
|
|
1753
|
+
def serialize_model(self, handler):
|
|
1754
|
+
optional_fields = set(["count", "on_codes"])
|
|
1755
|
+
serialized = handler(self)
|
|
1756
|
+
m = {}
|
|
1757
|
+
|
|
1758
|
+
for n, f in type(self).model_fields.items():
|
|
1759
|
+
k = f.alias or n
|
|
1760
|
+
val = serialized.get(k)
|
|
1761
|
+
|
|
1762
|
+
if val != UNSET_SENTINEL:
|
|
1763
|
+
if val is not None or k not in optional_fields:
|
|
1764
|
+
m[k] = val
|
|
1765
|
+
|
|
1766
|
+
return m
|
|
1767
|
+
|
|
1250
1768
|
|
|
1251
1769
|
class StreamRunAgentFallbackModelConfiguration2TypedDict(TypedDict):
|
|
1252
1770
|
r"""Fallback model configuration with optional parameters and retry settings."""
|
|
@@ -1255,7 +1773,7 @@ class StreamRunAgentFallbackModelConfiguration2TypedDict(TypedDict):
|
|
|
1255
1773
|
r"""A fallback model ID string. Must support tool calling."""
|
|
1256
1774
|
parameters: NotRequired[StreamRunAgentFallbackModelConfigurationParametersTypedDict]
|
|
1257
1775
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1258
|
-
retry: NotRequired[
|
|
1776
|
+
retry: NotRequired[StreamRunAgentFallbackModelConfigurationAgentsRetryTypedDict]
|
|
1259
1777
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1260
1778
|
|
|
1261
1779
|
|
|
@@ -1268,9 +1786,25 @@ class StreamRunAgentFallbackModelConfiguration2(BaseModel):
|
|
|
1268
1786
|
parameters: Optional[StreamRunAgentFallbackModelConfigurationParameters] = None
|
|
1269
1787
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1270
1788
|
|
|
1271
|
-
retry: Optional[
|
|
1789
|
+
retry: Optional[StreamRunAgentFallbackModelConfigurationAgentsRetry] = None
|
|
1272
1790
|
r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
|
|
1273
1791
|
|
|
1792
|
+
@model_serializer(mode="wrap")
|
|
1793
|
+
def serialize_model(self, handler):
|
|
1794
|
+
optional_fields = set(["parameters", "retry"])
|
|
1795
|
+
serialized = handler(self)
|
|
1796
|
+
m = {}
|
|
1797
|
+
|
|
1798
|
+
for n, f in type(self).model_fields.items():
|
|
1799
|
+
k = f.alias or n
|
|
1800
|
+
val = serialized.get(k)
|
|
1801
|
+
|
|
1802
|
+
if val != UNSET_SENTINEL:
|
|
1803
|
+
if val is not None or k not in optional_fields:
|
|
1804
|
+
m[k] = val
|
|
1805
|
+
|
|
1806
|
+
return m
|
|
1807
|
+
|
|
1274
1808
|
|
|
1275
1809
|
StreamRunAgentFallbackModelConfigurationTypedDict = TypeAliasType(
|
|
1276
1810
|
"StreamRunAgentFallbackModelConfigurationTypedDict",
|
|
@@ -1355,6 +1889,22 @@ class StreamRunAgentA2AMessage(BaseModel):
|
|
|
1355
1889
|
message_id: Annotated[Optional[str], pydantic.Field(alias="messageId")] = None
|
|
1356
1890
|
r"""Optional A2A message ID in ULID format"""
|
|
1357
1891
|
|
|
1892
|
+
@model_serializer(mode="wrap")
|
|
1893
|
+
def serialize_model(self, handler):
|
|
1894
|
+
optional_fields = set(["messageId"])
|
|
1895
|
+
serialized = handler(self)
|
|
1896
|
+
m = {}
|
|
1897
|
+
|
|
1898
|
+
for n, f in type(self).model_fields.items():
|
|
1899
|
+
k = f.alias or n
|
|
1900
|
+
val = serialized.get(k)
|
|
1901
|
+
|
|
1902
|
+
if val != UNSET_SENTINEL:
|
|
1903
|
+
if val is not None or k not in optional_fields:
|
|
1904
|
+
m[k] = val
|
|
1905
|
+
|
|
1906
|
+
return m
|
|
1907
|
+
|
|
1358
1908
|
|
|
1359
1909
|
class StreamRunAgentIdentityTypedDict(TypedDict):
|
|
1360
1910
|
r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
|
|
@@ -1394,6 +1944,22 @@ class StreamRunAgentIdentity(BaseModel):
|
|
|
1394
1944
|
tags: Optional[List[str]] = None
|
|
1395
1945
|
r"""A list of tags associated with the contact"""
|
|
1396
1946
|
|
|
1947
|
+
@model_serializer(mode="wrap")
|
|
1948
|
+
def serialize_model(self, handler):
|
|
1949
|
+
optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
|
|
1950
|
+
serialized = handler(self)
|
|
1951
|
+
m = {}
|
|
1952
|
+
|
|
1953
|
+
for n, f in type(self).model_fields.items():
|
|
1954
|
+
k = f.alias or n
|
|
1955
|
+
val = serialized.get(k)
|
|
1956
|
+
|
|
1957
|
+
if val != UNSET_SENTINEL:
|
|
1958
|
+
if val is not None or k not in optional_fields:
|
|
1959
|
+
m[k] = val
|
|
1960
|
+
|
|
1961
|
+
return m
|
|
1962
|
+
|
|
1397
1963
|
|
|
1398
1964
|
@deprecated(
|
|
1399
1965
|
"warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
@@ -1439,6 +2005,22 @@ class StreamRunAgentContact(BaseModel):
|
|
|
1439
2005
|
tags: Optional[List[str]] = None
|
|
1440
2006
|
r"""A list of tags associated with the contact"""
|
|
1441
2007
|
|
|
2008
|
+
@model_serializer(mode="wrap")
|
|
2009
|
+
def serialize_model(self, handler):
|
|
2010
|
+
optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
|
|
2011
|
+
serialized = handler(self)
|
|
2012
|
+
m = {}
|
|
2013
|
+
|
|
2014
|
+
for n, f in type(self).model_fields.items():
|
|
2015
|
+
k = f.alias or n
|
|
2016
|
+
val = serialized.get(k)
|
|
2017
|
+
|
|
2018
|
+
if val != UNSET_SENTINEL:
|
|
2019
|
+
if val is not None or k not in optional_fields:
|
|
2020
|
+
m[k] = val
|
|
2021
|
+
|
|
2022
|
+
return m
|
|
2023
|
+
|
|
1442
2024
|
|
|
1443
2025
|
class StreamRunAgentThreadTypedDict(TypedDict):
|
|
1444
2026
|
r"""Thread information to group related requests"""
|
|
@@ -1458,6 +2040,22 @@ class StreamRunAgentThread(BaseModel):
|
|
|
1458
2040
|
tags: Optional[List[str]] = None
|
|
1459
2041
|
r"""Optional tags to differentiate or categorize threads"""
|
|
1460
2042
|
|
|
2043
|
+
@model_serializer(mode="wrap")
|
|
2044
|
+
def serialize_model(self, handler):
|
|
2045
|
+
optional_fields = set(["tags"])
|
|
2046
|
+
serialized = handler(self)
|
|
2047
|
+
m = {}
|
|
2048
|
+
|
|
2049
|
+
for n, f in type(self).model_fields.items():
|
|
2050
|
+
k = f.alias or n
|
|
2051
|
+
val = serialized.get(k)
|
|
2052
|
+
|
|
2053
|
+
if val != UNSET_SENTINEL:
|
|
2054
|
+
if val is not None or k not in optional_fields:
|
|
2055
|
+
m[k] = val
|
|
2056
|
+
|
|
2057
|
+
return m
|
|
2058
|
+
|
|
1461
2059
|
|
|
1462
2060
|
class StreamRunAgentMemoryTypedDict(TypedDict):
|
|
1463
2061
|
r"""Memory configuration for the agent execution. Used to associate memory stores with specific entities like users or sessions."""
|
|
@@ -1497,8 +2095,24 @@ class StreamRunAgentTeamOfAgents(BaseModel):
|
|
|
1497
2095
|
role: Optional[str] = None
|
|
1498
2096
|
r"""The role of the agent in this context. This is used to give extra information to the leader to help it decide which agent to hand off to."""
|
|
1499
2097
|
|
|
2098
|
+
@model_serializer(mode="wrap")
|
|
2099
|
+
def serialize_model(self, handler):
|
|
2100
|
+
optional_fields = set(["role"])
|
|
2101
|
+
serialized = handler(self)
|
|
2102
|
+
m = {}
|
|
2103
|
+
|
|
2104
|
+
for n, f in type(self).model_fields.items():
|
|
2105
|
+
k = f.alias or n
|
|
2106
|
+
val = serialized.get(k)
|
|
1500
2107
|
|
|
1501
|
-
|
|
2108
|
+
if val != UNSET_SENTINEL:
|
|
2109
|
+
if val is not None or k not in optional_fields:
|
|
2110
|
+
m[k] = val
|
|
2111
|
+
|
|
2112
|
+
return m
|
|
2113
|
+
|
|
2114
|
+
|
|
2115
|
+
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type = Literal[
|
|
1502
2116
|
"mcp",
|
|
1503
2117
|
]
|
|
1504
2118
|
|
|
@@ -1513,29 +2127,61 @@ class StreamRunAgentAgentToolInputRunAgentsHeaders(BaseModel):
|
|
|
1513
2127
|
|
|
1514
2128
|
encrypted: Optional[bool] = False
|
|
1515
2129
|
|
|
2130
|
+
@model_serializer(mode="wrap")
|
|
2131
|
+
def serialize_model(self, handler):
|
|
2132
|
+
optional_fields = set(["encrypted"])
|
|
2133
|
+
serialized = handler(self)
|
|
2134
|
+
m = {}
|
|
2135
|
+
|
|
2136
|
+
for n, f in type(self).model_fields.items():
|
|
2137
|
+
k = f.alias or n
|
|
2138
|
+
val = serialized.get(k)
|
|
2139
|
+
|
|
2140
|
+
if val != UNSET_SENTINEL:
|
|
2141
|
+
if val is not None or k not in optional_fields:
|
|
2142
|
+
m[k] = val
|
|
2143
|
+
|
|
2144
|
+
return m
|
|
2145
|
+
|
|
1516
2146
|
|
|
1517
|
-
|
|
2147
|
+
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType = Literal[
|
|
1518
2148
|
"object",
|
|
1519
2149
|
]
|
|
1520
2150
|
|
|
1521
2151
|
|
|
1522
|
-
class
|
|
1523
|
-
type:
|
|
2152
|
+
class StreamRunAgentAgentToolInputRunAgentsSchemaTypedDict(TypedDict):
|
|
2153
|
+
type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
|
|
1524
2154
|
properties: NotRequired[Dict[str, Any]]
|
|
1525
2155
|
required: NotRequired[List[str]]
|
|
1526
2156
|
|
|
1527
2157
|
|
|
1528
|
-
class
|
|
1529
|
-
type:
|
|
2158
|
+
class StreamRunAgentAgentToolInputRunAgentsSchema(BaseModel):
|
|
2159
|
+
type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
|
|
1530
2160
|
|
|
1531
2161
|
properties: Optional[Dict[str, Any]] = None
|
|
1532
2162
|
|
|
1533
2163
|
required: Optional[List[str]] = None
|
|
1534
2164
|
|
|
2165
|
+
@model_serializer(mode="wrap")
|
|
2166
|
+
def serialize_model(self, handler):
|
|
2167
|
+
optional_fields = set(["properties", "required"])
|
|
2168
|
+
serialized = handler(self)
|
|
2169
|
+
m = {}
|
|
2170
|
+
|
|
2171
|
+
for n, f in type(self).model_fields.items():
|
|
2172
|
+
k = f.alias or n
|
|
2173
|
+
val = serialized.get(k)
|
|
2174
|
+
|
|
2175
|
+
if val != UNSET_SENTINEL:
|
|
2176
|
+
if val is not None or k not in optional_fields:
|
|
2177
|
+
m[k] = val
|
|
2178
|
+
|
|
2179
|
+
return m
|
|
2180
|
+
|
|
1535
2181
|
|
|
1536
2182
|
class AgentToolInputRunToolsTypedDict(TypedDict):
|
|
1537
2183
|
name: str
|
|
1538
|
-
schema_:
|
|
2184
|
+
schema_: StreamRunAgentAgentToolInputRunAgentsSchemaTypedDict
|
|
1539
2185
|
id: NotRequired[str]
|
|
1540
2186
|
description: NotRequired[str]
|
|
1541
2187
|
|
|
@@ -1543,12 +2189,30 @@ class AgentToolInputRunToolsTypedDict(TypedDict):
|
|
|
1543
2189
|
class AgentToolInputRunTools(BaseModel):
|
|
1544
2190
|
name: str
|
|
1545
2191
|
|
|
1546
|
-
schema_: Annotated[
|
|
2192
|
+
schema_: Annotated[
|
|
2193
|
+
StreamRunAgentAgentToolInputRunAgentsSchema, pydantic.Field(alias="schema")
|
|
2194
|
+
]
|
|
1547
2195
|
|
|
1548
|
-
id: Optional[str] = "
|
|
2196
|
+
id: Optional[str] = "01KG2RZQB6NY02X6PDEWS9295S"
|
|
1549
2197
|
|
|
1550
2198
|
description: Optional[str] = None
|
|
1551
2199
|
|
|
2200
|
+
@model_serializer(mode="wrap")
|
|
2201
|
+
def serialize_model(self, handler):
|
|
2202
|
+
optional_fields = set(["id", "description"])
|
|
2203
|
+
serialized = handler(self)
|
|
2204
|
+
m = {}
|
|
2205
|
+
|
|
2206
|
+
for n, f in type(self).model_fields.items():
|
|
2207
|
+
k = f.alias or n
|
|
2208
|
+
val = serialized.get(k)
|
|
2209
|
+
|
|
2210
|
+
if val != UNSET_SENTINEL:
|
|
2211
|
+
if val is not None or k not in optional_fields:
|
|
2212
|
+
m[k] = val
|
|
2213
|
+
|
|
2214
|
+
return m
|
|
2215
|
+
|
|
1552
2216
|
|
|
1553
2217
|
AgentToolInputRunConnectionType = Literal[
|
|
1554
2218
|
"http",
|
|
@@ -1583,11 +2247,27 @@ class AgentToolInputRunMcp(BaseModel):
|
|
|
1583
2247
|
headers: Optional[Dict[str, StreamRunAgentAgentToolInputRunAgentsHeaders]] = None
|
|
1584
2248
|
r"""HTTP headers for MCP server requests with encryption support"""
|
|
1585
2249
|
|
|
2250
|
+
@model_serializer(mode="wrap")
|
|
2251
|
+
def serialize_model(self, handler):
|
|
2252
|
+
optional_fields = set(["headers"])
|
|
2253
|
+
serialized = handler(self)
|
|
2254
|
+
m = {}
|
|
2255
|
+
|
|
2256
|
+
for n, f in type(self).model_fields.items():
|
|
2257
|
+
k = f.alias or n
|
|
2258
|
+
val = serialized.get(k)
|
|
2259
|
+
|
|
2260
|
+
if val != UNSET_SENTINEL:
|
|
2261
|
+
if val is not None or k not in optional_fields:
|
|
2262
|
+
m[k] = val
|
|
2263
|
+
|
|
2264
|
+
return m
|
|
2265
|
+
|
|
1586
2266
|
|
|
1587
2267
|
class AgentToolInputRunMCPToolRunTypedDict(TypedDict):
|
|
1588
2268
|
r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
|
|
1589
2269
|
|
|
1590
|
-
type:
|
|
2270
|
+
type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
|
|
1591
2271
|
key: str
|
|
1592
2272
|
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
1593
2273
|
description: str
|
|
@@ -1601,7 +2281,7 @@ class AgentToolInputRunMCPToolRunTypedDict(TypedDict):
|
|
|
1601
2281
|
class AgentToolInputRunMCPToolRun(BaseModel):
|
|
1602
2282
|
r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
|
|
1603
2283
|
|
|
1604
|
-
type:
|
|
2284
|
+
type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
|
|
1605
2285
|
|
|
1606
2286
|
key: str
|
|
1607
2287
|
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
@@ -1617,6 +2297,157 @@ class AgentToolInputRunMCPToolRun(BaseModel):
|
|
|
1617
2297
|
|
|
1618
2298
|
requires_approval: Optional[bool] = False
|
|
1619
2299
|
|
|
2300
|
+
@model_serializer(mode="wrap")
|
|
2301
|
+
def serialize_model(self, handler):
|
|
2302
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2303
|
+
serialized = handler(self)
|
|
2304
|
+
m = {}
|
|
2305
|
+
|
|
2306
|
+
for n, f in type(self).model_fields.items():
|
|
2307
|
+
k = f.alias or n
|
|
2308
|
+
val = serialized.get(k)
|
|
2309
|
+
|
|
2310
|
+
if val != UNSET_SENTINEL:
|
|
2311
|
+
if val is not None or k not in optional_fields:
|
|
2312
|
+
m[k] = val
|
|
2313
|
+
|
|
2314
|
+
return m
|
|
2315
|
+
|
|
2316
|
+
|
|
2317
|
+
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type = Literal[
|
|
2318
|
+
"json_schema",
|
|
2319
|
+
]
|
|
2320
|
+
|
|
2321
|
+
|
|
2322
|
+
class StreamRunAgentAgentToolInputRunSchemaTypedDict(TypedDict):
|
|
2323
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2324
|
+
|
|
2325
|
+
type: str
|
|
2326
|
+
r"""The JSON Schema type"""
|
|
2327
|
+
properties: Dict[str, Any]
|
|
2328
|
+
r"""The properties of the JSON Schema object"""
|
|
2329
|
+
required: List[str]
|
|
2330
|
+
r"""Array of required property names"""
|
|
2331
|
+
|
|
2332
|
+
|
|
2333
|
+
class StreamRunAgentAgentToolInputRunSchema(BaseModel):
|
|
2334
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2335
|
+
|
|
2336
|
+
model_config = ConfigDict(
|
|
2337
|
+
populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
|
|
2338
|
+
)
|
|
2339
|
+
__pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
|
|
2340
|
+
|
|
2341
|
+
type: str
|
|
2342
|
+
r"""The JSON Schema type"""
|
|
2343
|
+
|
|
2344
|
+
properties: Dict[str, Any]
|
|
2345
|
+
r"""The properties of the JSON Schema object"""
|
|
2346
|
+
|
|
2347
|
+
required: List[str]
|
|
2348
|
+
r"""Array of required property names"""
|
|
2349
|
+
|
|
2350
|
+
@property
|
|
2351
|
+
def additional_properties(self):
|
|
2352
|
+
return self.__pydantic_extra__
|
|
2353
|
+
|
|
2354
|
+
@additional_properties.setter
|
|
2355
|
+
def additional_properties(self, value):
|
|
2356
|
+
self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
|
|
2357
|
+
|
|
2358
|
+
|
|
2359
|
+
class StreamRunAgentAgentToolInputRunJSONSchemaTypedDict(TypedDict):
|
|
2360
|
+
name: str
|
|
2361
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2362
|
+
description: str
|
|
2363
|
+
r"""A description of what the response format is for. This will be shown to the user."""
|
|
2364
|
+
schema_: StreamRunAgentAgentToolInputRunSchemaTypedDict
|
|
2365
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2366
|
+
strict: NotRequired[bool]
|
|
2367
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
|
|
2368
|
+
|
|
2369
|
+
|
|
2370
|
+
class StreamRunAgentAgentToolInputRunJSONSchema(BaseModel):
|
|
2371
|
+
name: str
|
|
2372
|
+
r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
|
|
2373
|
+
|
|
2374
|
+
description: str
|
|
2375
|
+
r"""A description of what the response format is for. This will be shown to the user."""
|
|
2376
|
+
|
|
2377
|
+
schema_: Annotated[
|
|
2378
|
+
StreamRunAgentAgentToolInputRunSchema, pydantic.Field(alias="schema")
|
|
2379
|
+
]
|
|
2380
|
+
r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2381
|
+
|
|
2382
|
+
strict: Optional[bool] = None
|
|
2383
|
+
r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
|
|
2384
|
+
|
|
2385
|
+
@model_serializer(mode="wrap")
|
|
2386
|
+
def serialize_model(self, handler):
|
|
2387
|
+
optional_fields = set(["strict"])
|
|
2388
|
+
serialized = handler(self)
|
|
2389
|
+
m = {}
|
|
2390
|
+
|
|
2391
|
+
for n, f in type(self).model_fields.items():
|
|
2392
|
+
k = f.alias or n
|
|
2393
|
+
val = serialized.get(k)
|
|
2394
|
+
|
|
2395
|
+
if val != UNSET_SENTINEL:
|
|
2396
|
+
if val is not None or k not in optional_fields:
|
|
2397
|
+
m[k] = val
|
|
2398
|
+
|
|
2399
|
+
return m
|
|
2400
|
+
|
|
2401
|
+
|
|
2402
|
+
class AgentToolInputRunJSONSchemaToolRunTypedDict(TypedDict):
|
|
2403
|
+
r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
|
|
2404
|
+
|
|
2405
|
+
type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
|
|
2406
|
+
key: str
|
|
2407
|
+
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
2408
|
+
description: str
|
|
2409
|
+
r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
|
|
2410
|
+
json_schema: StreamRunAgentAgentToolInputRunJSONSchemaTypedDict
|
|
2411
|
+
id: NotRequired[str]
|
|
2412
|
+
display_name: NotRequired[str]
|
|
2413
|
+
requires_approval: NotRequired[bool]
|
|
2414
|
+
|
|
2415
|
+
|
|
2416
|
+
class AgentToolInputRunJSONSchemaToolRun(BaseModel):
|
|
2417
|
+
r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
|
|
2418
|
+
|
|
2419
|
+
type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
|
|
2420
|
+
|
|
2421
|
+
key: str
|
|
2422
|
+
r"""Unique key of the tool as it will be displayed in the UI"""
|
|
2423
|
+
|
|
2424
|
+
description: str
|
|
2425
|
+
r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
|
|
2426
|
+
|
|
2427
|
+
json_schema: StreamRunAgentAgentToolInputRunJSONSchema
|
|
2428
|
+
|
|
2429
|
+
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = None
|
|
2430
|
+
|
|
2431
|
+
display_name: Optional[str] = None
|
|
2432
|
+
|
|
2433
|
+
requires_approval: Optional[bool] = False
|
|
2434
|
+
|
|
2435
|
+
@model_serializer(mode="wrap")
|
|
2436
|
+
def serialize_model(self, handler):
|
|
2437
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2438
|
+
serialized = handler(self)
|
|
2439
|
+
m = {}
|
|
2440
|
+
|
|
2441
|
+
for n, f in type(self).model_fields.items():
|
|
2442
|
+
k = f.alias or n
|
|
2443
|
+
val = serialized.get(k)
|
|
2444
|
+
|
|
2445
|
+
if val != UNSET_SENTINEL:
|
|
2446
|
+
if val is not None or k not in optional_fields:
|
|
2447
|
+
m[k] = val
|
|
2448
|
+
|
|
2449
|
+
return m
|
|
2450
|
+
|
|
1620
2451
|
|
|
1621
2452
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools14Type = Literal[
|
|
1622
2453
|
"function",
|
|
@@ -1690,6 +2521,22 @@ class StreamRunAgentAgentToolInputRunFunction(BaseModel):
|
|
|
1690
2521
|
parameters: Optional[StreamRunAgentAgentToolInputRunAgentsParameters] = None
|
|
1691
2522
|
r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
1692
2523
|
|
|
2524
|
+
@model_serializer(mode="wrap")
|
|
2525
|
+
def serialize_model(self, handler):
|
|
2526
|
+
optional_fields = set(["description", "strict", "parameters"])
|
|
2527
|
+
serialized = handler(self)
|
|
2528
|
+
m = {}
|
|
2529
|
+
|
|
2530
|
+
for n, f in type(self).model_fields.items():
|
|
2531
|
+
k = f.alias or n
|
|
2532
|
+
val = serialized.get(k)
|
|
2533
|
+
|
|
2534
|
+
if val != UNSET_SENTINEL:
|
|
2535
|
+
if val is not None or k not in optional_fields:
|
|
2536
|
+
m[k] = val
|
|
2537
|
+
|
|
2538
|
+
return m
|
|
2539
|
+
|
|
1693
2540
|
|
|
1694
2541
|
class AgentToolInputRunFunctionToolRunTypedDict(TypedDict):
|
|
1695
2542
|
r"""Function tool with inline definition for on-the-fly creation in run endpoint"""
|
|
@@ -1722,6 +2569,24 @@ class AgentToolInputRunFunctionToolRun(BaseModel):
|
|
|
1722
2569
|
|
|
1723
2570
|
requires_approval: Optional[bool] = False
|
|
1724
2571
|
|
|
2572
|
+
@model_serializer(mode="wrap")
|
|
2573
|
+
def serialize_model(self, handler):
|
|
2574
|
+
optional_fields = set(
|
|
2575
|
+
["_id", "display_name", "description", "requires_approval"]
|
|
2576
|
+
)
|
|
2577
|
+
serialized = handler(self)
|
|
2578
|
+
m = {}
|
|
2579
|
+
|
|
2580
|
+
for n, f in type(self).model_fields.items():
|
|
2581
|
+
k = f.alias or n
|
|
2582
|
+
val = serialized.get(k)
|
|
2583
|
+
|
|
2584
|
+
if val != UNSET_SENTINEL:
|
|
2585
|
+
if val is not None or k not in optional_fields:
|
|
2586
|
+
m[k] = val
|
|
2587
|
+
|
|
2588
|
+
return m
|
|
2589
|
+
|
|
1725
2590
|
|
|
1726
2591
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools13Type = Literal[
|
|
1727
2592
|
"code",
|
|
@@ -1788,8 +2653,24 @@ class AgentToolInputRunCodeTool(BaseModel):
|
|
|
1788
2653
|
code: str
|
|
1789
2654
|
r"""The code to execute."""
|
|
1790
2655
|
|
|
1791
|
-
parameters: Optional[StreamRunAgentAgentToolInputRunParameters] = None
|
|
1792
|
-
r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2656
|
+
parameters: Optional[StreamRunAgentAgentToolInputRunParameters] = None
|
|
2657
|
+
r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
|
|
2658
|
+
|
|
2659
|
+
@model_serializer(mode="wrap")
|
|
2660
|
+
def serialize_model(self, handler):
|
|
2661
|
+
optional_fields = set(["parameters"])
|
|
2662
|
+
serialized = handler(self)
|
|
2663
|
+
m = {}
|
|
2664
|
+
|
|
2665
|
+
for n, f in type(self).model_fields.items():
|
|
2666
|
+
k = f.alias or n
|
|
2667
|
+
val = serialized.get(k)
|
|
2668
|
+
|
|
2669
|
+
if val != UNSET_SENTINEL:
|
|
2670
|
+
if val is not None or k not in optional_fields:
|
|
2671
|
+
m[k] = val
|
|
2672
|
+
|
|
2673
|
+
return m
|
|
1793
2674
|
|
|
1794
2675
|
|
|
1795
2676
|
class AgentToolInputRunCodeToolRunTypedDict(TypedDict):
|
|
@@ -1825,6 +2706,22 @@ class AgentToolInputRunCodeToolRun(BaseModel):
|
|
|
1825
2706
|
|
|
1826
2707
|
requires_approval: Optional[bool] = False
|
|
1827
2708
|
|
|
2709
|
+
@model_serializer(mode="wrap")
|
|
2710
|
+
def serialize_model(self, handler):
|
|
2711
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2712
|
+
serialized = handler(self)
|
|
2713
|
+
m = {}
|
|
2714
|
+
|
|
2715
|
+
for n, f in type(self).model_fields.items():
|
|
2716
|
+
k = f.alias or n
|
|
2717
|
+
val = serialized.get(k)
|
|
2718
|
+
|
|
2719
|
+
if val != UNSET_SENTINEL:
|
|
2720
|
+
if val is not None or k not in optional_fields:
|
|
2721
|
+
m[k] = val
|
|
2722
|
+
|
|
2723
|
+
return m
|
|
2724
|
+
|
|
1828
2725
|
|
|
1829
2726
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12Type = Literal[
|
|
1830
2727
|
"http",
|
|
@@ -1850,6 +2747,22 @@ class StreamRunAgentHeaders2(BaseModel):
|
|
|
1850
2747
|
|
|
1851
2748
|
encrypted: Optional[bool] = False
|
|
1852
2749
|
|
|
2750
|
+
@model_serializer(mode="wrap")
|
|
2751
|
+
def serialize_model(self, handler):
|
|
2752
|
+
optional_fields = set(["encrypted"])
|
|
2753
|
+
serialized = handler(self)
|
|
2754
|
+
m = {}
|
|
2755
|
+
|
|
2756
|
+
for n, f in type(self).model_fields.items():
|
|
2757
|
+
k = f.alias or n
|
|
2758
|
+
val = serialized.get(k)
|
|
2759
|
+
|
|
2760
|
+
if val != UNSET_SENTINEL:
|
|
2761
|
+
if val is not None or k not in optional_fields:
|
|
2762
|
+
m[k] = val
|
|
2763
|
+
|
|
2764
|
+
return m
|
|
2765
|
+
|
|
1853
2766
|
|
|
1854
2767
|
StreamRunAgentAgentToolInputRunHeadersTypedDict = TypeAliasType(
|
|
1855
2768
|
"StreamRunAgentAgentToolInputRunHeadersTypedDict",
|
|
@@ -1890,6 +2803,22 @@ class AgentToolInputRunBlueprint(BaseModel):
|
|
|
1890
2803
|
body: Optional[Dict[str, Any]] = None
|
|
1891
2804
|
r"""The body to send with the request."""
|
|
1892
2805
|
|
|
2806
|
+
@model_serializer(mode="wrap")
|
|
2807
|
+
def serialize_model(self, handler):
|
|
2808
|
+
optional_fields = set(["headers", "body"])
|
|
2809
|
+
serialized = handler(self)
|
|
2810
|
+
m = {}
|
|
2811
|
+
|
|
2812
|
+
for n, f in type(self).model_fields.items():
|
|
2813
|
+
k = f.alias or n
|
|
2814
|
+
val = serialized.get(k)
|
|
2815
|
+
|
|
2816
|
+
if val != UNSET_SENTINEL:
|
|
2817
|
+
if val is not None or k not in optional_fields:
|
|
2818
|
+
m[k] = val
|
|
2819
|
+
|
|
2820
|
+
return m
|
|
2821
|
+
|
|
1893
2822
|
|
|
1894
2823
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12HTTPType = (
|
|
1895
2824
|
Literal[
|
|
@@ -1937,6 +2866,22 @@ class AgentToolInputRunArguments(BaseModel):
|
|
|
1937
2866
|
default_value: Optional[AgentToolInputRunDefaultValue] = None
|
|
1938
2867
|
r"""The default value of the argument."""
|
|
1939
2868
|
|
|
2869
|
+
@model_serializer(mode="wrap")
|
|
2870
|
+
def serialize_model(self, handler):
|
|
2871
|
+
optional_fields = set(["send_to_model", "default_value"])
|
|
2872
|
+
serialized = handler(self)
|
|
2873
|
+
m = {}
|
|
2874
|
+
|
|
2875
|
+
for n, f in type(self).model_fields.items():
|
|
2876
|
+
k = f.alias or n
|
|
2877
|
+
val = serialized.get(k)
|
|
2878
|
+
|
|
2879
|
+
if val != UNSET_SENTINEL:
|
|
2880
|
+
if val is not None or k not in optional_fields:
|
|
2881
|
+
m[k] = val
|
|
2882
|
+
|
|
2883
|
+
return m
|
|
2884
|
+
|
|
1940
2885
|
|
|
1941
2886
|
class AgentToolInputRunHTTPTypedDict(TypedDict):
|
|
1942
2887
|
blueprint: AgentToolInputRunBlueprintTypedDict
|
|
@@ -1952,6 +2897,22 @@ class AgentToolInputRunHTTP(BaseModel):
|
|
|
1952
2897
|
arguments: Optional[Dict[str, AgentToolInputRunArguments]] = None
|
|
1953
2898
|
r"""The arguments to send with the request. The keys will be used to replace the placeholders in the `blueprint` field."""
|
|
1954
2899
|
|
|
2900
|
+
@model_serializer(mode="wrap")
|
|
2901
|
+
def serialize_model(self, handler):
|
|
2902
|
+
optional_fields = set(["arguments"])
|
|
2903
|
+
serialized = handler(self)
|
|
2904
|
+
m = {}
|
|
2905
|
+
|
|
2906
|
+
for n, f in type(self).model_fields.items():
|
|
2907
|
+
k = f.alias or n
|
|
2908
|
+
val = serialized.get(k)
|
|
2909
|
+
|
|
2910
|
+
if val != UNSET_SENTINEL:
|
|
2911
|
+
if val is not None or k not in optional_fields:
|
|
2912
|
+
m[k] = val
|
|
2913
|
+
|
|
2914
|
+
return m
|
|
2915
|
+
|
|
1955
2916
|
|
|
1956
2917
|
class AgentToolInputRunHTTPToolRunTypedDict(TypedDict):
|
|
1957
2918
|
r"""HTTP tool with inline definition for on-the-fly creation in run endpoint"""
|
|
@@ -1986,6 +2947,22 @@ class AgentToolInputRunHTTPToolRun(BaseModel):
|
|
|
1986
2947
|
|
|
1987
2948
|
requires_approval: Optional[bool] = False
|
|
1988
2949
|
|
|
2950
|
+
@model_serializer(mode="wrap")
|
|
2951
|
+
def serialize_model(self, handler):
|
|
2952
|
+
optional_fields = set(["_id", "display_name", "requires_approval"])
|
|
2953
|
+
serialized = handler(self)
|
|
2954
|
+
m = {}
|
|
2955
|
+
|
|
2956
|
+
for n, f in type(self).model_fields.items():
|
|
2957
|
+
k = f.alias or n
|
|
2958
|
+
val = serialized.get(k)
|
|
2959
|
+
|
|
2960
|
+
if val != UNSET_SENTINEL:
|
|
2961
|
+
if val is not None or k not in optional_fields:
|
|
2962
|
+
m[k] = val
|
|
2963
|
+
|
|
2964
|
+
return m
|
|
2965
|
+
|
|
1989
2966
|
|
|
1990
2967
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools11Type = Literal[
|
|
1991
2968
|
"current_date",
|
|
@@ -2008,6 +2985,22 @@ class StreamRunAgentAgentToolInputRunCurrentDateTool(BaseModel):
|
|
|
2008
2985
|
requires_approval: Optional[bool] = None
|
|
2009
2986
|
r"""Whether this tool requires approval before execution"""
|
|
2010
2987
|
|
|
2988
|
+
@model_serializer(mode="wrap")
|
|
2989
|
+
def serialize_model(self, handler):
|
|
2990
|
+
optional_fields = set(["requires_approval"])
|
|
2991
|
+
serialized = handler(self)
|
|
2992
|
+
m = {}
|
|
2993
|
+
|
|
2994
|
+
for n, f in type(self).model_fields.items():
|
|
2995
|
+
k = f.alias or n
|
|
2996
|
+
val = serialized.get(k)
|
|
2997
|
+
|
|
2998
|
+
if val != UNSET_SENTINEL:
|
|
2999
|
+
if val is not None or k not in optional_fields:
|
|
3000
|
+
m[k] = val
|
|
3001
|
+
|
|
3002
|
+
return m
|
|
3003
|
+
|
|
2011
3004
|
|
|
2012
3005
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools10Type = Literal[
|
|
2013
3006
|
"query_knowledge_base",
|
|
@@ -2030,6 +3023,22 @@ class StreamRunAgentAgentToolInputRunQueryKnowledgeBaseTool(BaseModel):
|
|
|
2030
3023
|
requires_approval: Optional[bool] = None
|
|
2031
3024
|
r"""Whether this tool requires approval before execution"""
|
|
2032
3025
|
|
|
3026
|
+
@model_serializer(mode="wrap")
|
|
3027
|
+
def serialize_model(self, handler):
|
|
3028
|
+
optional_fields = set(["requires_approval"])
|
|
3029
|
+
serialized = handler(self)
|
|
3030
|
+
m = {}
|
|
3031
|
+
|
|
3032
|
+
for n, f in type(self).model_fields.items():
|
|
3033
|
+
k = f.alias or n
|
|
3034
|
+
val = serialized.get(k)
|
|
3035
|
+
|
|
3036
|
+
if val != UNSET_SENTINEL:
|
|
3037
|
+
if val is not None or k not in optional_fields:
|
|
3038
|
+
m[k] = val
|
|
3039
|
+
|
|
3040
|
+
return m
|
|
3041
|
+
|
|
2033
3042
|
|
|
2034
3043
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools9Type = Literal[
|
|
2035
3044
|
"retrieve_knowledge_bases",
|
|
@@ -2052,6 +3061,22 @@ class StreamRunAgentAgentToolInputRunRetrieveKnowledgeBasesTool(BaseModel):
|
|
|
2052
3061
|
requires_approval: Optional[bool] = None
|
|
2053
3062
|
r"""Whether this tool requires approval before execution"""
|
|
2054
3063
|
|
|
3064
|
+
@model_serializer(mode="wrap")
|
|
3065
|
+
def serialize_model(self, handler):
|
|
3066
|
+
optional_fields = set(["requires_approval"])
|
|
3067
|
+
serialized = handler(self)
|
|
3068
|
+
m = {}
|
|
3069
|
+
|
|
3070
|
+
for n, f in type(self).model_fields.items():
|
|
3071
|
+
k = f.alias or n
|
|
3072
|
+
val = serialized.get(k)
|
|
3073
|
+
|
|
3074
|
+
if val != UNSET_SENTINEL:
|
|
3075
|
+
if val is not None or k not in optional_fields:
|
|
3076
|
+
m[k] = val
|
|
3077
|
+
|
|
3078
|
+
return m
|
|
3079
|
+
|
|
2055
3080
|
|
|
2056
3081
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools8Type = Literal[
|
|
2057
3082
|
"delete_memory_document",
|
|
@@ -2074,6 +3099,22 @@ class StreamRunAgentAgentToolInputRunDeleteMemoryDocumentTool(BaseModel):
|
|
|
2074
3099
|
requires_approval: Optional[bool] = None
|
|
2075
3100
|
r"""Whether this tool requires approval before execution"""
|
|
2076
3101
|
|
|
3102
|
+
@model_serializer(mode="wrap")
|
|
3103
|
+
def serialize_model(self, handler):
|
|
3104
|
+
optional_fields = set(["requires_approval"])
|
|
3105
|
+
serialized = handler(self)
|
|
3106
|
+
m = {}
|
|
3107
|
+
|
|
3108
|
+
for n, f in type(self).model_fields.items():
|
|
3109
|
+
k = f.alias or n
|
|
3110
|
+
val = serialized.get(k)
|
|
3111
|
+
|
|
3112
|
+
if val != UNSET_SENTINEL:
|
|
3113
|
+
if val is not None or k not in optional_fields:
|
|
3114
|
+
m[k] = val
|
|
3115
|
+
|
|
3116
|
+
return m
|
|
3117
|
+
|
|
2077
3118
|
|
|
2078
3119
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools7Type = Literal[
|
|
2079
3120
|
"retrieve_memory_stores",
|
|
@@ -2096,6 +3137,22 @@ class StreamRunAgentAgentToolInputRunRetrieveMemoryStoresTool(BaseModel):
|
|
|
2096
3137
|
requires_approval: Optional[bool] = None
|
|
2097
3138
|
r"""Whether this tool requires approval before execution"""
|
|
2098
3139
|
|
|
3140
|
+
@model_serializer(mode="wrap")
|
|
3141
|
+
def serialize_model(self, handler):
|
|
3142
|
+
optional_fields = set(["requires_approval"])
|
|
3143
|
+
serialized = handler(self)
|
|
3144
|
+
m = {}
|
|
3145
|
+
|
|
3146
|
+
for n, f in type(self).model_fields.items():
|
|
3147
|
+
k = f.alias or n
|
|
3148
|
+
val = serialized.get(k)
|
|
3149
|
+
|
|
3150
|
+
if val != UNSET_SENTINEL:
|
|
3151
|
+
if val is not None or k not in optional_fields:
|
|
3152
|
+
m[k] = val
|
|
3153
|
+
|
|
3154
|
+
return m
|
|
3155
|
+
|
|
2099
3156
|
|
|
2100
3157
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsToolsType = Literal[
|
|
2101
3158
|
"write_memory_store",
|
|
@@ -2118,6 +3175,22 @@ class StreamRunAgentAgentToolInputRunWriteMemoryStoreTool(BaseModel):
|
|
|
2118
3175
|
requires_approval: Optional[bool] = None
|
|
2119
3176
|
r"""Whether this tool requires approval before execution"""
|
|
2120
3177
|
|
|
3178
|
+
@model_serializer(mode="wrap")
|
|
3179
|
+
def serialize_model(self, handler):
|
|
3180
|
+
optional_fields = set(["requires_approval"])
|
|
3181
|
+
serialized = handler(self)
|
|
3182
|
+
m = {}
|
|
3183
|
+
|
|
3184
|
+
for n, f in type(self).model_fields.items():
|
|
3185
|
+
k = f.alias or n
|
|
3186
|
+
val = serialized.get(k)
|
|
3187
|
+
|
|
3188
|
+
if val != UNSET_SENTINEL:
|
|
3189
|
+
if val is not None or k not in optional_fields:
|
|
3190
|
+
m[k] = val
|
|
3191
|
+
|
|
3192
|
+
return m
|
|
3193
|
+
|
|
2121
3194
|
|
|
2122
3195
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsType = Literal[
|
|
2123
3196
|
"query_memory_store",
|
|
@@ -2140,6 +3213,22 @@ class StreamRunAgentAgentToolInputRunQueryMemoryStoreTool(BaseModel):
|
|
|
2140
3213
|
requires_approval: Optional[bool] = None
|
|
2141
3214
|
r"""Whether this tool requires approval before execution"""
|
|
2142
3215
|
|
|
3216
|
+
@model_serializer(mode="wrap")
|
|
3217
|
+
def serialize_model(self, handler):
|
|
3218
|
+
optional_fields = set(["requires_approval"])
|
|
3219
|
+
serialized = handler(self)
|
|
3220
|
+
m = {}
|
|
3221
|
+
|
|
3222
|
+
for n, f in type(self).model_fields.items():
|
|
3223
|
+
k = f.alias or n
|
|
3224
|
+
val = serialized.get(k)
|
|
3225
|
+
|
|
3226
|
+
if val != UNSET_SENTINEL:
|
|
3227
|
+
if val is not None or k not in optional_fields:
|
|
3228
|
+
m[k] = val
|
|
3229
|
+
|
|
3230
|
+
return m
|
|
3231
|
+
|
|
2143
3232
|
|
|
2144
3233
|
StreamRunAgentAgentToolInputRunAgentsRequestRequestBodyType = Literal[
|
|
2145
3234
|
"retrieve_agents",
|
|
@@ -2162,6 +3251,22 @@ class StreamRunAgentAgentToolInputRunRetrieveAgentsTool(BaseModel):
|
|
|
2162
3251
|
requires_approval: Optional[bool] = None
|
|
2163
3252
|
r"""Whether this tool requires approval before execution"""
|
|
2164
3253
|
|
|
3254
|
+
@model_serializer(mode="wrap")
|
|
3255
|
+
def serialize_model(self, handler):
|
|
3256
|
+
optional_fields = set(["requires_approval"])
|
|
3257
|
+
serialized = handler(self)
|
|
3258
|
+
m = {}
|
|
3259
|
+
|
|
3260
|
+
for n, f in type(self).model_fields.items():
|
|
3261
|
+
k = f.alias or n
|
|
3262
|
+
val = serialized.get(k)
|
|
3263
|
+
|
|
3264
|
+
if val != UNSET_SENTINEL:
|
|
3265
|
+
if val is not None or k not in optional_fields:
|
|
3266
|
+
m[k] = val
|
|
3267
|
+
|
|
3268
|
+
return m
|
|
3269
|
+
|
|
2165
3270
|
|
|
2166
3271
|
StreamRunAgentAgentToolInputRunAgentsRequestType = Literal["call_sub_agent",]
|
|
2167
3272
|
|
|
@@ -2182,6 +3287,22 @@ class StreamRunAgentAgentToolInputRunCallSubAgentTool(BaseModel):
|
|
|
2182
3287
|
requires_approval: Optional[bool] = None
|
|
2183
3288
|
r"""Whether this tool requires approval before execution"""
|
|
2184
3289
|
|
|
3290
|
+
@model_serializer(mode="wrap")
|
|
3291
|
+
def serialize_model(self, handler):
|
|
3292
|
+
optional_fields = set(["requires_approval"])
|
|
3293
|
+
serialized = handler(self)
|
|
3294
|
+
m = {}
|
|
3295
|
+
|
|
3296
|
+
for n, f in type(self).model_fields.items():
|
|
3297
|
+
k = f.alias or n
|
|
3298
|
+
val = serialized.get(k)
|
|
3299
|
+
|
|
3300
|
+
if val != UNSET_SENTINEL:
|
|
3301
|
+
if val is not None or k not in optional_fields:
|
|
3302
|
+
m[k] = val
|
|
3303
|
+
|
|
3304
|
+
return m
|
|
3305
|
+
|
|
2185
3306
|
|
|
2186
3307
|
StreamRunAgentAgentToolInputRunAgentsType = Literal["web_scraper",]
|
|
2187
3308
|
|
|
@@ -2202,6 +3323,22 @@ class StreamRunAgentAgentToolInputRunWebScraperTool(BaseModel):
|
|
|
2202
3323
|
requires_approval: Optional[bool] = None
|
|
2203
3324
|
r"""Whether this tool requires approval before execution"""
|
|
2204
3325
|
|
|
3326
|
+
@model_serializer(mode="wrap")
|
|
3327
|
+
def serialize_model(self, handler):
|
|
3328
|
+
optional_fields = set(["requires_approval"])
|
|
3329
|
+
serialized = handler(self)
|
|
3330
|
+
m = {}
|
|
3331
|
+
|
|
3332
|
+
for n, f in type(self).model_fields.items():
|
|
3333
|
+
k = f.alias or n
|
|
3334
|
+
val = serialized.get(k)
|
|
3335
|
+
|
|
3336
|
+
if val != UNSET_SENTINEL:
|
|
3337
|
+
if val is not None or k not in optional_fields:
|
|
3338
|
+
m[k] = val
|
|
3339
|
+
|
|
3340
|
+
return m
|
|
3341
|
+
|
|
2205
3342
|
|
|
2206
3343
|
StreamRunAgentAgentToolInputRunType = Literal["google_search",]
|
|
2207
3344
|
|
|
@@ -2222,6 +3359,22 @@ class StreamRunAgentAgentToolInputRunGoogleSearchTool(BaseModel):
|
|
|
2222
3359
|
requires_approval: Optional[bool] = None
|
|
2223
3360
|
r"""Whether this tool requires approval before execution"""
|
|
2224
3361
|
|
|
3362
|
+
@model_serializer(mode="wrap")
|
|
3363
|
+
def serialize_model(self, handler):
|
|
3364
|
+
optional_fields = set(["requires_approval"])
|
|
3365
|
+
serialized = handler(self)
|
|
3366
|
+
m = {}
|
|
3367
|
+
|
|
3368
|
+
for n, f in type(self).model_fields.items():
|
|
3369
|
+
k = f.alias or n
|
|
3370
|
+
val = serialized.get(k)
|
|
3371
|
+
|
|
3372
|
+
if val != UNSET_SENTINEL:
|
|
3373
|
+
if val is not None or k not in optional_fields:
|
|
3374
|
+
m[k] = val
|
|
3375
|
+
|
|
3376
|
+
return m
|
|
3377
|
+
|
|
2225
3378
|
|
|
2226
3379
|
StreamRunAgentAgentToolInputRunTypedDict = TypeAliasType(
|
|
2227
3380
|
"StreamRunAgentAgentToolInputRunTypedDict",
|
|
@@ -2240,10 +3393,11 @@ StreamRunAgentAgentToolInputRunTypedDict = TypeAliasType(
|
|
|
2240
3393
|
AgentToolInputRunHTTPToolRunTypedDict,
|
|
2241
3394
|
AgentToolInputRunCodeToolRunTypedDict,
|
|
2242
3395
|
AgentToolInputRunFunctionToolRunTypedDict,
|
|
3396
|
+
AgentToolInputRunJSONSchemaToolRunTypedDict,
|
|
2243
3397
|
AgentToolInputRunMCPToolRunTypedDict,
|
|
2244
3398
|
],
|
|
2245
3399
|
)
|
|
2246
|
-
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
|
|
3400
|
+
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
|
|
2247
3401
|
|
|
2248
3402
|
|
|
2249
3403
|
StreamRunAgentAgentToolInputRun = Annotated[
|
|
@@ -2286,11 +3440,12 @@ StreamRunAgentAgentToolInputRun = Annotated[
|
|
|
2286
3440
|
Annotated[AgentToolInputRunHTTPToolRun, Tag("http")],
|
|
2287
3441
|
Annotated[AgentToolInputRunCodeToolRun, Tag("code")],
|
|
2288
3442
|
Annotated[AgentToolInputRunFunctionToolRun, Tag("function")],
|
|
3443
|
+
Annotated[AgentToolInputRunJSONSchemaToolRun, Tag("json_schema")],
|
|
2289
3444
|
Annotated[AgentToolInputRunMCPToolRun, Tag("mcp")],
|
|
2290
3445
|
],
|
|
2291
3446
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
2292
3447
|
]
|
|
2293
|
-
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
|
|
3448
|
+
r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
|
|
2294
3449
|
|
|
2295
3450
|
|
|
2296
3451
|
StreamRunAgentToolApprovalRequired = Literal[
|
|
@@ -2327,6 +3482,22 @@ class StreamRunAgentEvaluators(BaseModel):
|
|
|
2327
3482
|
sample_rate: Optional[float] = 50
|
|
2328
3483
|
r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
|
|
2329
3484
|
|
|
3485
|
+
@model_serializer(mode="wrap")
|
|
3486
|
+
def serialize_model(self, handler):
|
|
3487
|
+
optional_fields = set(["sample_rate"])
|
|
3488
|
+
serialized = handler(self)
|
|
3489
|
+
m = {}
|
|
3490
|
+
|
|
3491
|
+
for n, f in type(self).model_fields.items():
|
|
3492
|
+
k = f.alias or n
|
|
3493
|
+
val = serialized.get(k)
|
|
3494
|
+
|
|
3495
|
+
if val != UNSET_SENTINEL:
|
|
3496
|
+
if val is not None or k not in optional_fields:
|
|
3497
|
+
m[k] = val
|
|
3498
|
+
|
|
3499
|
+
return m
|
|
3500
|
+
|
|
2330
3501
|
|
|
2331
3502
|
StreamRunAgentAgentsExecuteOn = Literal[
|
|
2332
3503
|
"input",
|
|
@@ -2354,6 +3525,22 @@ class StreamRunAgentGuardrails(BaseModel):
|
|
|
2354
3525
|
sample_rate: Optional[float] = 50
|
|
2355
3526
|
r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
|
|
2356
3527
|
|
|
3528
|
+
@model_serializer(mode="wrap")
|
|
3529
|
+
def serialize_model(self, handler):
|
|
3530
|
+
optional_fields = set(["sample_rate"])
|
|
3531
|
+
serialized = handler(self)
|
|
3532
|
+
m = {}
|
|
3533
|
+
|
|
3534
|
+
for n, f in type(self).model_fields.items():
|
|
3535
|
+
k = f.alias or n
|
|
3536
|
+
val = serialized.get(k)
|
|
3537
|
+
|
|
3538
|
+
if val != UNSET_SENTINEL:
|
|
3539
|
+
if val is not None or k not in optional_fields:
|
|
3540
|
+
m[k] = val
|
|
3541
|
+
|
|
3542
|
+
return m
|
|
3543
|
+
|
|
2357
3544
|
|
|
2358
3545
|
class StreamRunAgentSettingsTypedDict(TypedDict):
|
|
2359
3546
|
tools: NotRequired[List[StreamRunAgentAgentToolInputRunTypedDict]]
|
|
@@ -2389,6 +3576,31 @@ class StreamRunAgentSettings(BaseModel):
|
|
|
2389
3576
|
guardrails: Optional[List[StreamRunAgentGuardrails]] = None
|
|
2390
3577
|
r"""Configuration for a guardrail applied to the agent"""
|
|
2391
3578
|
|
|
3579
|
+
@model_serializer(mode="wrap")
|
|
3580
|
+
def serialize_model(self, handler):
|
|
3581
|
+
optional_fields = set(
|
|
3582
|
+
[
|
|
3583
|
+
"tools",
|
|
3584
|
+
"tool_approval_required",
|
|
3585
|
+
"max_iterations",
|
|
3586
|
+
"max_execution_time",
|
|
3587
|
+
"evaluators",
|
|
3588
|
+
"guardrails",
|
|
3589
|
+
]
|
|
3590
|
+
)
|
|
3591
|
+
serialized = handler(self)
|
|
3592
|
+
m = {}
|
|
3593
|
+
|
|
3594
|
+
for n, f in type(self).model_fields.items():
|
|
3595
|
+
k = f.alias or n
|
|
3596
|
+
val = serialized.get(k)
|
|
3597
|
+
|
|
3598
|
+
if val != UNSET_SENTINEL:
|
|
3599
|
+
if val is not None or k not in optional_fields:
|
|
3600
|
+
m[k] = val
|
|
3601
|
+
|
|
3602
|
+
return m
|
|
3603
|
+
|
|
2392
3604
|
|
|
2393
3605
|
class StreamRunAgentRequestBodyTypedDict(TypedDict):
|
|
2394
3606
|
key: str
|
|
@@ -2514,6 +3726,39 @@ class StreamRunAgentRequestBody(BaseModel):
|
|
|
2514
3726
|
stream_timeout_seconds: Optional[float] = None
|
|
2515
3727
|
r"""Stream timeout in seconds (1-3600). Default: 1800 (30 minutes)"""
|
|
2516
3728
|
|
|
3729
|
+
@model_serializer(mode="wrap")
|
|
3730
|
+
def serialize_model(self, handler):
|
|
3731
|
+
optional_fields = set(
|
|
3732
|
+
[
|
|
3733
|
+
"task_id",
|
|
3734
|
+
"fallback_models",
|
|
3735
|
+
"variables",
|
|
3736
|
+
"identity",
|
|
3737
|
+
"contact",
|
|
3738
|
+
"thread",
|
|
3739
|
+
"memory",
|
|
3740
|
+
"description",
|
|
3741
|
+
"system_prompt",
|
|
3742
|
+
"memory_stores",
|
|
3743
|
+
"knowledge_bases",
|
|
3744
|
+
"team_of_agents",
|
|
3745
|
+
"metadata",
|
|
3746
|
+
"stream_timeout_seconds",
|
|
3747
|
+
]
|
|
3748
|
+
)
|
|
3749
|
+
serialized = handler(self)
|
|
3750
|
+
m = {}
|
|
3751
|
+
|
|
3752
|
+
for n, f in type(self).model_fields.items():
|
|
3753
|
+
k = f.alias or n
|
|
3754
|
+
val = serialized.get(k)
|
|
3755
|
+
|
|
3756
|
+
if val != UNSET_SENTINEL:
|
|
3757
|
+
if val is not None or k not in optional_fields:
|
|
3758
|
+
m[k] = val
|
|
3759
|
+
|
|
3760
|
+
return m
|
|
3761
|
+
|
|
2517
3762
|
|
|
2518
3763
|
class StreamRunAgentAgentsResponseBodyData(BaseModel):
|
|
2519
3764
|
message: str
|