orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (166) hide show
  1. orq_ai_sdk/_version.py +3 -3
  2. orq_ai_sdk/agents.py +186 -186
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +438 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +364 -0
  10. orq_ai_sdk/embeddings.py +344 -0
  11. orq_ai_sdk/generations.py +370 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5746 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1945 -383
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1381 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +2078 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +579 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +715 -0
  41. orq_ai_sdk/models/createimageop.py +407 -128
  42. orq_ai_sdk/models/createimagevariationop.py +706 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2759 -1251
  49. orq_ai_sdk/models/creatererankop.py +608 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +466 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +732 -0
  54. orq_ai_sdk/models/createtranslationop.py +702 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1696 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1679 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1676 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +805 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1690 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1462 -196
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1439 -194
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1968 -397
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2854 -1448
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +666 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +330 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +333 -0
  158. orq_ai_sdk/transcriptions.py +416 -0
  159. orq_ai_sdk/translations.py +384 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +364 -0
  162. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
  163. orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -102,6 +102,22 @@ class RunAgentResponseFormatAgentsJSONSchema(BaseModel):
102
102
  strict: Optional[bool] = False
103
103
  r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
104
104
 
105
+ @model_serializer(mode="wrap")
106
+ def serialize_model(self, handler):
107
+ optional_fields = set(["description", "schema", "strict"])
108
+ serialized = handler(self)
109
+ m = {}
110
+
111
+ for n, f in type(self).model_fields.items():
112
+ k = f.alias or n
113
+ val = serialized.get(k)
114
+
115
+ if val != UNSET_SENTINEL:
116
+ if val is not None or k not in optional_fields:
117
+ m[k] = val
118
+
119
+ return m
120
+
105
121
 
106
122
  class RunAgentResponseFormatJSONSchemaTypedDict(TypedDict):
107
123
  r"""
@@ -232,6 +248,22 @@ class RunAgentModelConfigurationStreamOptions(BaseModel):
232
248
  include_usage: Optional[bool] = None
233
249
  r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
234
250
 
251
+ @model_serializer(mode="wrap")
252
+ def serialize_model(self, handler):
253
+ optional_fields = set(["include_usage"])
254
+ serialized = handler(self)
255
+ m = {}
256
+
257
+ for n, f in type(self).model_fields.items():
258
+ k = f.alias or n
259
+ val = serialized.get(k)
260
+
261
+ if val != UNSET_SENTINEL:
262
+ if val is not None or k not in optional_fields:
263
+ m[k] = val
264
+
265
+ return m
266
+
235
267
 
236
268
  RunAgentModelConfigurationThinkingTypedDict = TypeAliasType(
237
269
  "RunAgentModelConfigurationThinkingTypedDict",
@@ -274,6 +306,22 @@ class RunAgentToolChoice2(BaseModel):
274
306
  type: Optional[RunAgentToolChoiceType] = None
275
307
  r"""The type of the tool. Currently, only function is supported."""
276
308
 
309
+ @model_serializer(mode="wrap")
310
+ def serialize_model(self, handler):
311
+ optional_fields = set(["type"])
312
+ serialized = handler(self)
313
+ m = {}
314
+
315
+ for n, f in type(self).model_fields.items():
316
+ k = f.alias or n
317
+ val = serialized.get(k)
318
+
319
+ if val != UNSET_SENTINEL:
320
+ if val is not None or k not in optional_fields:
321
+ m[k] = val
322
+
323
+ return m
324
+
277
325
 
278
326
  RunAgentToolChoice1 = Literal[
279
327
  "none",
@@ -340,9 +388,159 @@ class RunAgentModelConfigurationGuardrails(BaseModel):
340
388
  r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
341
389
 
342
390
 
391
+ class RunAgentModelConfigurationFallbacksTypedDict(TypedDict):
392
+ model: str
393
+ r"""Fallback model identifier"""
394
+
395
+
396
+ class RunAgentModelConfigurationFallbacks(BaseModel):
397
+ model: str
398
+ r"""Fallback model identifier"""
399
+
400
+
401
+ class RunAgentModelConfigurationRetryTypedDict(TypedDict):
402
+ r"""Retry configuration for the request"""
403
+
404
+ count: NotRequired[float]
405
+ r"""Number of retry attempts (1-5)"""
406
+ on_codes: NotRequired[List[float]]
407
+ r"""HTTP status codes that trigger retry logic"""
408
+
409
+
410
+ class RunAgentModelConfigurationRetry(BaseModel):
411
+ r"""Retry configuration for the request"""
412
+
413
+ count: Optional[float] = 3
414
+ r"""Number of retry attempts (1-5)"""
415
+
416
+ on_codes: Optional[List[float]] = None
417
+ r"""HTTP status codes that trigger retry logic"""
418
+
419
+ @model_serializer(mode="wrap")
420
+ def serialize_model(self, handler):
421
+ optional_fields = set(["count", "on_codes"])
422
+ serialized = handler(self)
423
+ m = {}
424
+
425
+ for n, f in type(self).model_fields.items():
426
+ k = f.alias or n
427
+ val = serialized.get(k)
428
+
429
+ if val != UNSET_SENTINEL:
430
+ if val is not None or k not in optional_fields:
431
+ m[k] = val
432
+
433
+ return m
434
+
435
+
436
+ RunAgentModelConfigurationType = Literal["exact_match",]
437
+
438
+
439
+ class RunAgentModelConfigurationCacheTypedDict(TypedDict):
440
+ r"""Cache configuration for the request."""
441
+
442
+ type: RunAgentModelConfigurationType
443
+ ttl: NotRequired[float]
444
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
445
+
446
+
447
+ class RunAgentModelConfigurationCache(BaseModel):
448
+ r"""Cache configuration for the request."""
449
+
450
+ type: RunAgentModelConfigurationType
451
+
452
+ ttl: Optional[float] = 1800
453
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
454
+
455
+ @model_serializer(mode="wrap")
456
+ def serialize_model(self, handler):
457
+ optional_fields = set(["ttl"])
458
+ serialized = handler(self)
459
+ m = {}
460
+
461
+ for n, f in type(self).model_fields.items():
462
+ k = f.alias or n
463
+ val = serialized.get(k)
464
+
465
+ if val != UNSET_SENTINEL:
466
+ if val is not None or k not in optional_fields:
467
+ m[k] = val
468
+
469
+ return m
470
+
471
+
472
+ RunAgentLoadBalancerType = Literal["weight_based",]
473
+
474
+
475
+ class RunAgentLoadBalancerModelsTypedDict(TypedDict):
476
+ model: str
477
+ r"""Model identifier for load balancing"""
478
+ weight: NotRequired[float]
479
+ r"""Weight assigned to this model for load balancing"""
480
+
481
+
482
+ class RunAgentLoadBalancerModels(BaseModel):
483
+ model: str
484
+ r"""Model identifier for load balancing"""
485
+
486
+ weight: Optional[float] = 0.5
487
+ r"""Weight assigned to this model for load balancing"""
488
+
489
+ @model_serializer(mode="wrap")
490
+ def serialize_model(self, handler):
491
+ optional_fields = set(["weight"])
492
+ serialized = handler(self)
493
+ m = {}
494
+
495
+ for n, f in type(self).model_fields.items():
496
+ k = f.alias or n
497
+ val = serialized.get(k)
498
+
499
+ if val != UNSET_SENTINEL:
500
+ if val is not None or k not in optional_fields:
501
+ m[k] = val
502
+
503
+ return m
504
+
505
+
506
+ class RunAgentLoadBalancer1TypedDict(TypedDict):
507
+ type: RunAgentLoadBalancerType
508
+ models: List[RunAgentLoadBalancerModelsTypedDict]
509
+
510
+
511
+ class RunAgentLoadBalancer1(BaseModel):
512
+ type: RunAgentLoadBalancerType
513
+
514
+ models: List[RunAgentLoadBalancerModels]
515
+
516
+
517
+ RunAgentModelConfigurationLoadBalancerTypedDict = RunAgentLoadBalancer1TypedDict
518
+ r"""Load balancer configuration for the request."""
519
+
520
+
521
+ RunAgentModelConfigurationLoadBalancer = RunAgentLoadBalancer1
522
+ r"""Load balancer configuration for the request."""
523
+
524
+
525
+ class RunAgentModelConfigurationTimeoutTypedDict(TypedDict):
526
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
527
+
528
+ call_timeout: float
529
+ r"""Timeout value in milliseconds"""
530
+
531
+
532
+ class RunAgentModelConfigurationTimeout(BaseModel):
533
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
534
+
535
+ call_timeout: float
536
+ r"""Timeout value in milliseconds"""
537
+
538
+
343
539
  class RunAgentModelConfigurationParametersTypedDict(TypedDict):
344
540
  r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
345
541
 
542
+ name: NotRequired[str]
543
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
346
544
  audio: NotRequired[Nullable[RunAgentModelConfigurationAudioTypedDict]]
347
545
  r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
348
546
  frequency_penalty: NotRequired[Nullable[float]]
@@ -399,11 +597,24 @@ class RunAgentModelConfigurationParametersTypedDict(TypedDict):
399
597
  r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
400
598
  guardrails: NotRequired[List[RunAgentModelConfigurationGuardrailsTypedDict]]
401
599
  r"""A list of guardrails to apply to the request."""
600
+ fallbacks: NotRequired[List[RunAgentModelConfigurationFallbacksTypedDict]]
601
+ r"""Array of fallback models to use if primary model fails"""
602
+ retry: NotRequired[RunAgentModelConfigurationRetryTypedDict]
603
+ r"""Retry configuration for the request"""
604
+ cache: NotRequired[RunAgentModelConfigurationCacheTypedDict]
605
+ r"""Cache configuration for the request."""
606
+ load_balancer: NotRequired[RunAgentModelConfigurationLoadBalancerTypedDict]
607
+ r"""Load balancer configuration for the request."""
608
+ timeout: NotRequired[RunAgentModelConfigurationTimeoutTypedDict]
609
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
402
610
 
403
611
 
404
612
  class RunAgentModelConfigurationParameters(BaseModel):
405
613
  r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
406
614
 
615
+ name: Optional[str] = None
616
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
617
+
407
618
  audio: OptionalNullable[RunAgentModelConfigurationAudio] = UNSET
408
619
  r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
409
620
 
@@ -480,77 +691,97 @@ class RunAgentModelConfigurationParameters(BaseModel):
480
691
  guardrails: Optional[List[RunAgentModelConfigurationGuardrails]] = None
481
692
  r"""A list of guardrails to apply to the request."""
482
693
 
694
+ fallbacks: Optional[List[RunAgentModelConfigurationFallbacks]] = None
695
+ r"""Array of fallback models to use if primary model fails"""
696
+
697
+ retry: Optional[RunAgentModelConfigurationRetry] = None
698
+ r"""Retry configuration for the request"""
699
+
700
+ cache: Optional[RunAgentModelConfigurationCache] = None
701
+ r"""Cache configuration for the request."""
702
+
703
+ load_balancer: Optional[RunAgentModelConfigurationLoadBalancer] = None
704
+ r"""Load balancer configuration for the request."""
705
+
706
+ timeout: Optional[RunAgentModelConfigurationTimeout] = None
707
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
708
+
483
709
  @model_serializer(mode="wrap")
484
710
  def serialize_model(self, handler):
485
- optional_fields = [
486
- "audio",
487
- "frequency_penalty",
488
- "max_tokens",
489
- "max_completion_tokens",
490
- "logprobs",
491
- "top_logprobs",
492
- "n",
493
- "presence_penalty",
494
- "response_format",
495
- "reasoning_effort",
496
- "verbosity",
497
- "seed",
498
- "stop",
499
- "stream_options",
500
- "thinking",
501
- "temperature",
502
- "top_p",
503
- "top_k",
504
- "tool_choice",
505
- "parallel_tool_calls",
506
- "modalities",
507
- "guardrails",
508
- ]
509
- nullable_fields = [
510
- "audio",
511
- "frequency_penalty",
512
- "max_tokens",
513
- "max_completion_tokens",
514
- "logprobs",
515
- "top_logprobs",
516
- "n",
517
- "presence_penalty",
518
- "seed",
519
- "stop",
520
- "stream_options",
521
- "temperature",
522
- "top_p",
523
- "top_k",
524
- "modalities",
525
- ]
526
- null_default_fields = []
527
-
711
+ optional_fields = set(
712
+ [
713
+ "name",
714
+ "audio",
715
+ "frequency_penalty",
716
+ "max_tokens",
717
+ "max_completion_tokens",
718
+ "logprobs",
719
+ "top_logprobs",
720
+ "n",
721
+ "presence_penalty",
722
+ "response_format",
723
+ "reasoning_effort",
724
+ "verbosity",
725
+ "seed",
726
+ "stop",
727
+ "stream_options",
728
+ "thinking",
729
+ "temperature",
730
+ "top_p",
731
+ "top_k",
732
+ "tool_choice",
733
+ "parallel_tool_calls",
734
+ "modalities",
735
+ "guardrails",
736
+ "fallbacks",
737
+ "retry",
738
+ "cache",
739
+ "load_balancer",
740
+ "timeout",
741
+ ]
742
+ )
743
+ nullable_fields = set(
744
+ [
745
+ "audio",
746
+ "frequency_penalty",
747
+ "max_tokens",
748
+ "max_completion_tokens",
749
+ "logprobs",
750
+ "top_logprobs",
751
+ "n",
752
+ "presence_penalty",
753
+ "seed",
754
+ "stop",
755
+ "stream_options",
756
+ "temperature",
757
+ "top_p",
758
+ "top_k",
759
+ "modalities",
760
+ ]
761
+ )
528
762
  serialized = handler(self)
529
-
530
763
  m = {}
531
764
 
532
765
  for n, f in type(self).model_fields.items():
533
766
  k = f.alias or n
534
767
  val = serialized.get(k)
535
- serialized.pop(k, None)
536
-
537
- optional_nullable = k in optional_fields and k in nullable_fields
538
- is_set = (
539
- self.__pydantic_fields_set__.intersection({n})
540
- or k in null_default_fields
541
- ) # pylint: disable=no-member
542
-
543
- if val is not None and val != UNSET_SENTINEL:
544
- m[k] = val
545
- elif val != UNSET_SENTINEL and (
546
- not k in optional_fields or (optional_nullable and is_set)
547
- ):
548
- m[k] = val
768
+ is_nullable_and_explicitly_set = (
769
+ k in nullable_fields
770
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
771
+ )
772
+
773
+ if val != UNSET_SENTINEL:
774
+ if (
775
+ val is not None
776
+ or k not in optional_fields
777
+ or is_nullable_and_explicitly_set
778
+ ):
779
+ m[k] = val
549
780
 
550
781
  return m
551
782
 
552
783
 
553
- class RunAgentModelConfigurationRetryTypedDict(TypedDict):
784
+ class RunAgentModelConfigurationAgentsRetryTypedDict(TypedDict):
554
785
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
555
786
 
556
787
  count: NotRequired[float]
@@ -559,7 +790,7 @@ class RunAgentModelConfigurationRetryTypedDict(TypedDict):
559
790
  r"""HTTP status codes that trigger retry logic"""
560
791
 
561
792
 
562
- class RunAgentModelConfigurationRetry(BaseModel):
793
+ class RunAgentModelConfigurationAgentsRetry(BaseModel):
563
794
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
564
795
 
565
796
  count: Optional[float] = 3
@@ -568,6 +799,22 @@ class RunAgentModelConfigurationRetry(BaseModel):
568
799
  on_codes: Optional[List[float]] = None
569
800
  r"""HTTP status codes that trigger retry logic"""
570
801
 
802
+ @model_serializer(mode="wrap")
803
+ def serialize_model(self, handler):
804
+ optional_fields = set(["count", "on_codes"])
805
+ serialized = handler(self)
806
+ m = {}
807
+
808
+ for n, f in type(self).model_fields.items():
809
+ k = f.alias or n
810
+ val = serialized.get(k)
811
+
812
+ if val != UNSET_SENTINEL:
813
+ if val is not None or k not in optional_fields:
814
+ m[k] = val
815
+
816
+ return m
817
+
571
818
 
572
819
  class RunAgentModelConfiguration2TypedDict(TypedDict):
573
820
  r"""
@@ -579,7 +826,7 @@ class RunAgentModelConfiguration2TypedDict(TypedDict):
579
826
  r"""A model ID string (e.g., `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`). Only models that support tool calling can be used with agents."""
580
827
  parameters: NotRequired[RunAgentModelConfigurationParametersTypedDict]
581
828
  r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
582
- retry: NotRequired[RunAgentModelConfigurationRetryTypedDict]
829
+ retry: NotRequired[RunAgentModelConfigurationAgentsRetryTypedDict]
583
830
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
584
831
 
585
832
 
@@ -595,9 +842,25 @@ class RunAgentModelConfiguration2(BaseModel):
595
842
  parameters: Optional[RunAgentModelConfigurationParameters] = None
596
843
  r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
597
844
 
598
- retry: Optional[RunAgentModelConfigurationRetry] = None
845
+ retry: Optional[RunAgentModelConfigurationAgentsRetry] = None
599
846
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
600
847
 
848
+ @model_serializer(mode="wrap")
849
+ def serialize_model(self, handler):
850
+ optional_fields = set(["parameters", "retry"])
851
+ serialized = handler(self)
852
+ m = {}
853
+
854
+ for n, f in type(self).model_fields.items():
855
+ k = f.alias or n
856
+ val = serialized.get(k)
857
+
858
+ if val != UNSET_SENTINEL:
859
+ if val is not None or k not in optional_fields:
860
+ m[k] = val
861
+
862
+ return m
863
+
601
864
 
602
865
  RunAgentModelConfigurationTypedDict = TypeAliasType(
603
866
  "RunAgentModelConfigurationTypedDict",
@@ -683,6 +946,22 @@ class RunAgentResponseFormatAgentsRequestRequestBodyJSONSchema(BaseModel):
683
946
  strict: Optional[bool] = False
684
947
  r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
685
948
 
949
+ @model_serializer(mode="wrap")
950
+ def serialize_model(self, handler):
951
+ optional_fields = set(["description", "schema", "strict"])
952
+ serialized = handler(self)
953
+ m = {}
954
+
955
+ for n, f in type(self).model_fields.items():
956
+ k = f.alias or n
957
+ val = serialized.get(k)
958
+
959
+ if val != UNSET_SENTINEL:
960
+ if val is not None or k not in optional_fields:
961
+ m[k] = val
962
+
963
+ return m
964
+
686
965
 
687
966
  class RunAgentResponseFormatAgentsRequestJSONSchemaTypedDict(TypedDict):
688
967
  r"""
@@ -815,6 +1094,22 @@ class RunAgentFallbackModelConfigurationStreamOptions(BaseModel):
815
1094
  include_usage: Optional[bool] = None
816
1095
  r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
817
1096
 
1097
+ @model_serializer(mode="wrap")
1098
+ def serialize_model(self, handler):
1099
+ optional_fields = set(["include_usage"])
1100
+ serialized = handler(self)
1101
+ m = {}
1102
+
1103
+ for n, f in type(self).model_fields.items():
1104
+ k = f.alias or n
1105
+ val = serialized.get(k)
1106
+
1107
+ if val != UNSET_SENTINEL:
1108
+ if val is not None or k not in optional_fields:
1109
+ m[k] = val
1110
+
1111
+ return m
1112
+
818
1113
 
819
1114
  RunAgentFallbackModelConfigurationThinkingTypedDict = TypeAliasType(
820
1115
  "RunAgentFallbackModelConfigurationThinkingTypedDict",
@@ -857,6 +1152,22 @@ class RunAgentToolChoiceAgents2(BaseModel):
857
1152
  type: Optional[RunAgentToolChoiceAgentsType] = None
858
1153
  r"""The type of the tool. Currently, only function is supported."""
859
1154
 
1155
+ @model_serializer(mode="wrap")
1156
+ def serialize_model(self, handler):
1157
+ optional_fields = set(["type"])
1158
+ serialized = handler(self)
1159
+ m = {}
1160
+
1161
+ for n, f in type(self).model_fields.items():
1162
+ k = f.alias or n
1163
+ val = serialized.get(k)
1164
+
1165
+ if val != UNSET_SENTINEL:
1166
+ if val is not None or k not in optional_fields:
1167
+ m[k] = val
1168
+
1169
+ return m
1170
+
860
1171
 
861
1172
  RunAgentToolChoiceAgents1 = Literal[
862
1173
  "none",
@@ -923,74 +1234,239 @@ class RunAgentFallbackModelConfigurationGuardrails(BaseModel):
923
1234
  r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
924
1235
 
925
1236
 
926
- class RunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
927
- r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1237
+ class RunAgentFallbackModelConfigurationFallbacksTypedDict(TypedDict):
1238
+ model: str
1239
+ r"""Fallback model identifier"""
928
1240
 
929
- audio: NotRequired[Nullable[RunAgentFallbackModelConfigurationAudioTypedDict]]
930
- r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
931
- frequency_penalty: NotRequired[Nullable[float]]
932
- r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
933
- max_tokens: NotRequired[Nullable[int]]
934
- r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
935
1241
 
936
- This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
937
- """
938
- max_completion_tokens: NotRequired[Nullable[int]]
939
- r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
940
- logprobs: NotRequired[Nullable[bool]]
941
- r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
942
- top_logprobs: NotRequired[Nullable[int]]
943
- r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
944
- n: NotRequired[Nullable[int]]
945
- r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
946
- presence_penalty: NotRequired[Nullable[float]]
947
- r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
948
- response_format: NotRequired[
949
- RunAgentFallbackModelConfigurationResponseFormatTypedDict
950
- ]
951
- r"""An object specifying the format that the model must output"""
952
- reasoning_effort: NotRequired[RunAgentFallbackModelConfigurationReasoningEffort]
953
- r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1242
+ class RunAgentFallbackModelConfigurationFallbacks(BaseModel):
1243
+ model: str
1244
+ r"""Fallback model identifier"""
954
1245
 
955
- - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
956
- - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
957
- - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
958
- - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
959
1246
 
960
- Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
961
- """
962
- verbosity: NotRequired[str]
963
- r"""Adjusts response verbosity. Lower levels yield shorter answers."""
964
- seed: NotRequired[Nullable[float]]
965
- r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
966
- stop: NotRequired[Nullable[RunAgentFallbackModelConfigurationStopTypedDict]]
967
- r"""Up to 4 sequences where the API will stop generating further tokens."""
968
- stream_options: NotRequired[
969
- Nullable[RunAgentFallbackModelConfigurationStreamOptionsTypedDict]
970
- ]
971
- r"""Options for streaming response. Only set this when you set stream: true."""
972
- thinking: NotRequired[RunAgentFallbackModelConfigurationThinkingTypedDict]
973
- temperature: NotRequired[Nullable[float]]
974
- r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
975
- top_p: NotRequired[Nullable[float]]
976
- r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
977
- top_k: NotRequired[Nullable[float]]
978
- r"""Limits the model to consider only the top k most likely tokens at each step."""
979
- tool_choice: NotRequired[RunAgentFallbackModelConfigurationToolChoiceTypedDict]
980
- r"""Controls which (if any) tool is called by the model."""
981
- parallel_tool_calls: NotRequired[bool]
982
- r"""Whether to enable parallel function calling during tool use."""
983
- modalities: NotRequired[
984
- Nullable[List[RunAgentFallbackModelConfigurationModalities]]
985
- ]
1247
+ class RunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
1248
+ r"""Retry configuration for the request"""
1249
+
1250
+ count: NotRequired[float]
1251
+ r"""Number of retry attempts (1-5)"""
1252
+ on_codes: NotRequired[List[float]]
1253
+ r"""HTTP status codes that trigger retry logic"""
1254
+
1255
+
1256
+ class RunAgentFallbackModelConfigurationRetry(BaseModel):
1257
+ r"""Retry configuration for the request"""
1258
+
1259
+ count: Optional[float] = 3
1260
+ r"""Number of retry attempts (1-5)"""
1261
+
1262
+ on_codes: Optional[List[float]] = None
1263
+ r"""HTTP status codes that trigger retry logic"""
1264
+
1265
+ @model_serializer(mode="wrap")
1266
+ def serialize_model(self, handler):
1267
+ optional_fields = set(["count", "on_codes"])
1268
+ serialized = handler(self)
1269
+ m = {}
1270
+
1271
+ for n, f in type(self).model_fields.items():
1272
+ k = f.alias or n
1273
+ val = serialized.get(k)
1274
+
1275
+ if val != UNSET_SENTINEL:
1276
+ if val is not None or k not in optional_fields:
1277
+ m[k] = val
1278
+
1279
+ return m
1280
+
1281
+
1282
+ RunAgentFallbackModelConfigurationType = Literal["exact_match",]
1283
+
1284
+
1285
+ class RunAgentFallbackModelConfigurationCacheTypedDict(TypedDict):
1286
+ r"""Cache configuration for the request."""
1287
+
1288
+ type: RunAgentFallbackModelConfigurationType
1289
+ ttl: NotRequired[float]
1290
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1291
+
1292
+
1293
+ class RunAgentFallbackModelConfigurationCache(BaseModel):
1294
+ r"""Cache configuration for the request."""
1295
+
1296
+ type: RunAgentFallbackModelConfigurationType
1297
+
1298
+ ttl: Optional[float] = 1800
1299
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1300
+
1301
+ @model_serializer(mode="wrap")
1302
+ def serialize_model(self, handler):
1303
+ optional_fields = set(["ttl"])
1304
+ serialized = handler(self)
1305
+ m = {}
1306
+
1307
+ for n, f in type(self).model_fields.items():
1308
+ k = f.alias or n
1309
+ val = serialized.get(k)
1310
+
1311
+ if val != UNSET_SENTINEL:
1312
+ if val is not None or k not in optional_fields:
1313
+ m[k] = val
1314
+
1315
+ return m
1316
+
1317
+
1318
+ RunAgentLoadBalancerAgentsType = Literal["weight_based",]
1319
+
1320
+
1321
+ class RunAgentLoadBalancerAgentsModelsTypedDict(TypedDict):
1322
+ model: str
1323
+ r"""Model identifier for load balancing"""
1324
+ weight: NotRequired[float]
1325
+ r"""Weight assigned to this model for load balancing"""
1326
+
1327
+
1328
+ class RunAgentLoadBalancerAgentsModels(BaseModel):
1329
+ model: str
1330
+ r"""Model identifier for load balancing"""
1331
+
1332
+ weight: Optional[float] = 0.5
1333
+ r"""Weight assigned to this model for load balancing"""
1334
+
1335
+ @model_serializer(mode="wrap")
1336
+ def serialize_model(self, handler):
1337
+ optional_fields = set(["weight"])
1338
+ serialized = handler(self)
1339
+ m = {}
1340
+
1341
+ for n, f in type(self).model_fields.items():
1342
+ k = f.alias or n
1343
+ val = serialized.get(k)
1344
+
1345
+ if val != UNSET_SENTINEL:
1346
+ if val is not None or k not in optional_fields:
1347
+ m[k] = val
1348
+
1349
+ return m
1350
+
1351
+
1352
+ class RunAgentLoadBalancerAgents1TypedDict(TypedDict):
1353
+ type: RunAgentLoadBalancerAgentsType
1354
+ models: List[RunAgentLoadBalancerAgentsModelsTypedDict]
1355
+
1356
+
1357
+ class RunAgentLoadBalancerAgents1(BaseModel):
1358
+ type: RunAgentLoadBalancerAgentsType
1359
+
1360
+ models: List[RunAgentLoadBalancerAgentsModels]
1361
+
1362
+
1363
+ RunAgentFallbackModelConfigurationLoadBalancerTypedDict = (
1364
+ RunAgentLoadBalancerAgents1TypedDict
1365
+ )
1366
+ r"""Load balancer configuration for the request."""
1367
+
1368
+
1369
+ RunAgentFallbackModelConfigurationLoadBalancer = RunAgentLoadBalancerAgents1
1370
+ r"""Load balancer configuration for the request."""
1371
+
1372
+
1373
+ class RunAgentFallbackModelConfigurationTimeoutTypedDict(TypedDict):
1374
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1375
+
1376
+ call_timeout: float
1377
+ r"""Timeout value in milliseconds"""
1378
+
1379
+
1380
+ class RunAgentFallbackModelConfigurationTimeout(BaseModel):
1381
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1382
+
1383
+ call_timeout: float
1384
+ r"""Timeout value in milliseconds"""
1385
+
1386
+
1387
+ class RunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
1388
+ r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1389
+
1390
+ name: NotRequired[str]
1391
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
1392
+ audio: NotRequired[Nullable[RunAgentFallbackModelConfigurationAudioTypedDict]]
1393
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1394
+ frequency_penalty: NotRequired[Nullable[float]]
1395
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
1396
+ max_tokens: NotRequired[Nullable[int]]
1397
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
1398
+
1399
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
1400
+ """
1401
+ max_completion_tokens: NotRequired[Nullable[int]]
1402
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
1403
+ logprobs: NotRequired[Nullable[bool]]
1404
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
1405
+ top_logprobs: NotRequired[Nullable[int]]
1406
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
1407
+ n: NotRequired[Nullable[int]]
1408
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
1409
+ presence_penalty: NotRequired[Nullable[float]]
1410
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
1411
+ response_format: NotRequired[
1412
+ RunAgentFallbackModelConfigurationResponseFormatTypedDict
1413
+ ]
1414
+ r"""An object specifying the format that the model must output"""
1415
+ reasoning_effort: NotRequired[RunAgentFallbackModelConfigurationReasoningEffort]
1416
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1417
+
1418
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1419
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1420
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1421
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1422
+
1423
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1424
+ """
1425
+ verbosity: NotRequired[str]
1426
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
1427
+ seed: NotRequired[Nullable[float]]
1428
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
1429
+ stop: NotRequired[Nullable[RunAgentFallbackModelConfigurationStopTypedDict]]
1430
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1431
+ stream_options: NotRequired[
1432
+ Nullable[RunAgentFallbackModelConfigurationStreamOptionsTypedDict]
1433
+ ]
1434
+ r"""Options for streaming response. Only set this when you set stream: true."""
1435
+ thinking: NotRequired[RunAgentFallbackModelConfigurationThinkingTypedDict]
1436
+ temperature: NotRequired[Nullable[float]]
1437
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
1438
+ top_p: NotRequired[Nullable[float]]
1439
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
1440
+ top_k: NotRequired[Nullable[float]]
1441
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
1442
+ tool_choice: NotRequired[RunAgentFallbackModelConfigurationToolChoiceTypedDict]
1443
+ r"""Controls which (if any) tool is called by the model."""
1444
+ parallel_tool_calls: NotRequired[bool]
1445
+ r"""Whether to enable parallel function calling during tool use."""
1446
+ modalities: NotRequired[
1447
+ Nullable[List[RunAgentFallbackModelConfigurationModalities]]
1448
+ ]
986
1449
  r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
987
1450
  guardrails: NotRequired[List[RunAgentFallbackModelConfigurationGuardrailsTypedDict]]
988
1451
  r"""A list of guardrails to apply to the request."""
1452
+ fallbacks: NotRequired[List[RunAgentFallbackModelConfigurationFallbacksTypedDict]]
1453
+ r"""Array of fallback models to use if primary model fails"""
1454
+ retry: NotRequired[RunAgentFallbackModelConfigurationRetryTypedDict]
1455
+ r"""Retry configuration for the request"""
1456
+ cache: NotRequired[RunAgentFallbackModelConfigurationCacheTypedDict]
1457
+ r"""Cache configuration for the request."""
1458
+ load_balancer: NotRequired[RunAgentFallbackModelConfigurationLoadBalancerTypedDict]
1459
+ r"""Load balancer configuration for the request."""
1460
+ timeout: NotRequired[RunAgentFallbackModelConfigurationTimeoutTypedDict]
1461
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
989
1462
 
990
1463
 
991
1464
  class RunAgentFallbackModelConfigurationParameters(BaseModel):
992
1465
  r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
993
1466
 
1467
+ name: Optional[str] = None
1468
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
1469
+
994
1470
  audio: OptionalNullable[RunAgentFallbackModelConfigurationAudio] = UNSET
995
1471
  r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
996
1472
 
@@ -1071,77 +1547,97 @@ class RunAgentFallbackModelConfigurationParameters(BaseModel):
1071
1547
  guardrails: Optional[List[RunAgentFallbackModelConfigurationGuardrails]] = None
1072
1548
  r"""A list of guardrails to apply to the request."""
1073
1549
 
1550
+ fallbacks: Optional[List[RunAgentFallbackModelConfigurationFallbacks]] = None
1551
+ r"""Array of fallback models to use if primary model fails"""
1552
+
1553
+ retry: Optional[RunAgentFallbackModelConfigurationRetry] = None
1554
+ r"""Retry configuration for the request"""
1555
+
1556
+ cache: Optional[RunAgentFallbackModelConfigurationCache] = None
1557
+ r"""Cache configuration for the request."""
1558
+
1559
+ load_balancer: Optional[RunAgentFallbackModelConfigurationLoadBalancer] = None
1560
+ r"""Load balancer configuration for the request."""
1561
+
1562
+ timeout: Optional[RunAgentFallbackModelConfigurationTimeout] = None
1563
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1564
+
1074
1565
  @model_serializer(mode="wrap")
1075
1566
  def serialize_model(self, handler):
1076
- optional_fields = [
1077
- "audio",
1078
- "frequency_penalty",
1079
- "max_tokens",
1080
- "max_completion_tokens",
1081
- "logprobs",
1082
- "top_logprobs",
1083
- "n",
1084
- "presence_penalty",
1085
- "response_format",
1086
- "reasoning_effort",
1087
- "verbosity",
1088
- "seed",
1089
- "stop",
1090
- "stream_options",
1091
- "thinking",
1092
- "temperature",
1093
- "top_p",
1094
- "top_k",
1095
- "tool_choice",
1096
- "parallel_tool_calls",
1097
- "modalities",
1098
- "guardrails",
1099
- ]
1100
- nullable_fields = [
1101
- "audio",
1102
- "frequency_penalty",
1103
- "max_tokens",
1104
- "max_completion_tokens",
1105
- "logprobs",
1106
- "top_logprobs",
1107
- "n",
1108
- "presence_penalty",
1109
- "seed",
1110
- "stop",
1111
- "stream_options",
1112
- "temperature",
1113
- "top_p",
1114
- "top_k",
1115
- "modalities",
1116
- ]
1117
- null_default_fields = []
1118
-
1567
+ optional_fields = set(
1568
+ [
1569
+ "name",
1570
+ "audio",
1571
+ "frequency_penalty",
1572
+ "max_tokens",
1573
+ "max_completion_tokens",
1574
+ "logprobs",
1575
+ "top_logprobs",
1576
+ "n",
1577
+ "presence_penalty",
1578
+ "response_format",
1579
+ "reasoning_effort",
1580
+ "verbosity",
1581
+ "seed",
1582
+ "stop",
1583
+ "stream_options",
1584
+ "thinking",
1585
+ "temperature",
1586
+ "top_p",
1587
+ "top_k",
1588
+ "tool_choice",
1589
+ "parallel_tool_calls",
1590
+ "modalities",
1591
+ "guardrails",
1592
+ "fallbacks",
1593
+ "retry",
1594
+ "cache",
1595
+ "load_balancer",
1596
+ "timeout",
1597
+ ]
1598
+ )
1599
+ nullable_fields = set(
1600
+ [
1601
+ "audio",
1602
+ "frequency_penalty",
1603
+ "max_tokens",
1604
+ "max_completion_tokens",
1605
+ "logprobs",
1606
+ "top_logprobs",
1607
+ "n",
1608
+ "presence_penalty",
1609
+ "seed",
1610
+ "stop",
1611
+ "stream_options",
1612
+ "temperature",
1613
+ "top_p",
1614
+ "top_k",
1615
+ "modalities",
1616
+ ]
1617
+ )
1119
1618
  serialized = handler(self)
1120
-
1121
1619
  m = {}
1122
1620
 
1123
1621
  for n, f in type(self).model_fields.items():
1124
1622
  k = f.alias or n
1125
1623
  val = serialized.get(k)
1126
- serialized.pop(k, None)
1127
-
1128
- optional_nullable = k in optional_fields and k in nullable_fields
1129
- is_set = (
1130
- self.__pydantic_fields_set__.intersection({n})
1131
- or k in null_default_fields
1132
- ) # pylint: disable=no-member
1133
-
1134
- if val is not None and val != UNSET_SENTINEL:
1135
- m[k] = val
1136
- elif val != UNSET_SENTINEL and (
1137
- not k in optional_fields or (optional_nullable and is_set)
1138
- ):
1139
- m[k] = val
1624
+ is_nullable_and_explicitly_set = (
1625
+ k in nullable_fields
1626
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1627
+ )
1628
+
1629
+ if val != UNSET_SENTINEL:
1630
+ if (
1631
+ val is not None
1632
+ or k not in optional_fields
1633
+ or is_nullable_and_explicitly_set
1634
+ ):
1635
+ m[k] = val
1140
1636
 
1141
1637
  return m
1142
1638
 
1143
1639
 
1144
- class RunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
1640
+ class RunAgentFallbackModelConfigurationAgentsRetryTypedDict(TypedDict):
1145
1641
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1146
1642
 
1147
1643
  count: NotRequired[float]
@@ -1150,7 +1646,7 @@ class RunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
1150
1646
  r"""HTTP status codes that trigger retry logic"""
1151
1647
 
1152
1648
 
1153
- class RunAgentFallbackModelConfigurationRetry(BaseModel):
1649
+ class RunAgentFallbackModelConfigurationAgentsRetry(BaseModel):
1154
1650
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1155
1651
 
1156
1652
  count: Optional[float] = 3
@@ -1159,6 +1655,22 @@ class RunAgentFallbackModelConfigurationRetry(BaseModel):
1159
1655
  on_codes: Optional[List[float]] = None
1160
1656
  r"""HTTP status codes that trigger retry logic"""
1161
1657
 
1658
+ @model_serializer(mode="wrap")
1659
+ def serialize_model(self, handler):
1660
+ optional_fields = set(["count", "on_codes"])
1661
+ serialized = handler(self)
1662
+ m = {}
1663
+
1664
+ for n, f in type(self).model_fields.items():
1665
+ k = f.alias or n
1666
+ val = serialized.get(k)
1667
+
1668
+ if val != UNSET_SENTINEL:
1669
+ if val is not None or k not in optional_fields:
1670
+ m[k] = val
1671
+
1672
+ return m
1673
+
1162
1674
 
1163
1675
  class RunAgentFallbackModelConfiguration2TypedDict(TypedDict):
1164
1676
  r"""Fallback model configuration with optional parameters and retry settings."""
@@ -1167,7 +1679,7 @@ class RunAgentFallbackModelConfiguration2TypedDict(TypedDict):
1167
1679
  r"""A fallback model ID string. Must support tool calling."""
1168
1680
  parameters: NotRequired[RunAgentFallbackModelConfigurationParametersTypedDict]
1169
1681
  r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1170
- retry: NotRequired[RunAgentFallbackModelConfigurationRetryTypedDict]
1682
+ retry: NotRequired[RunAgentFallbackModelConfigurationAgentsRetryTypedDict]
1171
1683
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1172
1684
 
1173
1685
 
@@ -1180,9 +1692,25 @@ class RunAgentFallbackModelConfiguration2(BaseModel):
1180
1692
  parameters: Optional[RunAgentFallbackModelConfigurationParameters] = None
1181
1693
  r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1182
1694
 
1183
- retry: Optional[RunAgentFallbackModelConfigurationRetry] = None
1695
+ retry: Optional[RunAgentFallbackModelConfigurationAgentsRetry] = None
1184
1696
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1185
1697
 
1698
+ @model_serializer(mode="wrap")
1699
+ def serialize_model(self, handler):
1700
+ optional_fields = set(["parameters", "retry"])
1701
+ serialized = handler(self)
1702
+ m = {}
1703
+
1704
+ for n, f in type(self).model_fields.items():
1705
+ k = f.alias or n
1706
+ val = serialized.get(k)
1707
+
1708
+ if val != UNSET_SENTINEL:
1709
+ if val is not None or k not in optional_fields:
1710
+ m[k] = val
1711
+
1712
+ return m
1713
+
1186
1714
 
1187
1715
  RunAgentFallbackModelConfigurationTypedDict = TypeAliasType(
1188
1716
  "RunAgentFallbackModelConfigurationTypedDict",
@@ -1265,6 +1793,22 @@ class RunAgentA2AMessage(BaseModel):
1265
1793
  message_id: Annotated[Optional[str], pydantic.Field(alias="messageId")] = None
1266
1794
  r"""Optional A2A message ID in ULID format"""
1267
1795
 
1796
+ @model_serializer(mode="wrap")
1797
+ def serialize_model(self, handler):
1798
+ optional_fields = set(["messageId"])
1799
+ serialized = handler(self)
1800
+ m = {}
1801
+
1802
+ for n, f in type(self).model_fields.items():
1803
+ k = f.alias or n
1804
+ val = serialized.get(k)
1805
+
1806
+ if val != UNSET_SENTINEL:
1807
+ if val is not None or k not in optional_fields:
1808
+ m[k] = val
1809
+
1810
+ return m
1811
+
1268
1812
 
1269
1813
  class RunAgentIdentityTypedDict(TypedDict):
1270
1814
  r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
@@ -1304,6 +1848,22 @@ class RunAgentIdentity(BaseModel):
1304
1848
  tags: Optional[List[str]] = None
1305
1849
  r"""A list of tags associated with the contact"""
1306
1850
 
1851
+ @model_serializer(mode="wrap")
1852
+ def serialize_model(self, handler):
1853
+ optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
1854
+ serialized = handler(self)
1855
+ m = {}
1856
+
1857
+ for n, f in type(self).model_fields.items():
1858
+ k = f.alias or n
1859
+ val = serialized.get(k)
1860
+
1861
+ if val != UNSET_SENTINEL:
1862
+ if val is not None or k not in optional_fields:
1863
+ m[k] = val
1864
+
1865
+ return m
1866
+
1307
1867
 
1308
1868
  @deprecated(
1309
1869
  "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
@@ -1349,6 +1909,22 @@ class RunAgentContact(BaseModel):
1349
1909
  tags: Optional[List[str]] = None
1350
1910
  r"""A list of tags associated with the contact"""
1351
1911
 
1912
+ @model_serializer(mode="wrap")
1913
+ def serialize_model(self, handler):
1914
+ optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
1915
+ serialized = handler(self)
1916
+ m = {}
1917
+
1918
+ for n, f in type(self).model_fields.items():
1919
+ k = f.alias or n
1920
+ val = serialized.get(k)
1921
+
1922
+ if val != UNSET_SENTINEL:
1923
+ if val is not None or k not in optional_fields:
1924
+ m[k] = val
1925
+
1926
+ return m
1927
+
1352
1928
 
1353
1929
  class RunAgentThreadTypedDict(TypedDict):
1354
1930
  r"""Thread information to group related requests"""
@@ -1368,6 +1944,22 @@ class RunAgentThread(BaseModel):
1368
1944
  tags: Optional[List[str]] = None
1369
1945
  r"""Optional tags to differentiate or categorize threads"""
1370
1946
 
1947
+ @model_serializer(mode="wrap")
1948
+ def serialize_model(self, handler):
1949
+ optional_fields = set(["tags"])
1950
+ serialized = handler(self)
1951
+ m = {}
1952
+
1953
+ for n, f in type(self).model_fields.items():
1954
+ k = f.alias or n
1955
+ val = serialized.get(k)
1956
+
1957
+ if val != UNSET_SENTINEL:
1958
+ if val is not None or k not in optional_fields:
1959
+ m[k] = val
1960
+
1961
+ return m
1962
+
1371
1963
 
1372
1964
  class RunAgentMemoryTypedDict(TypedDict):
1373
1965
  r"""Memory configuration for the agent execution. Used to associate memory stores with specific entities like users or sessions."""
@@ -1407,8 +1999,24 @@ class RunAgentTeamOfAgents(BaseModel):
1407
1999
  role: Optional[str] = None
1408
2000
  r"""The role of the agent in this context. This is used to give extra information to the leader to help it decide which agent to hand off to."""
1409
2001
 
2002
+ @model_serializer(mode="wrap")
2003
+ def serialize_model(self, handler):
2004
+ optional_fields = set(["role"])
2005
+ serialized = handler(self)
2006
+ m = {}
1410
2007
 
1411
- RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type = Literal["mcp",]
2008
+ for n, f in type(self).model_fields.items():
2009
+ k = f.alias or n
2010
+ val = serialized.get(k)
2011
+
2012
+ if val != UNSET_SENTINEL:
2013
+ if val is not None or k not in optional_fields:
2014
+ m[k] = val
2015
+
2016
+ return m
2017
+
2018
+
2019
+ RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type = Literal["mcp",]
1412
2020
 
1413
2021
 
1414
2022
  class AgentToolInputRunHeadersTypedDict(TypedDict):
@@ -1421,29 +2029,61 @@ class AgentToolInputRunHeaders(BaseModel):
1421
2029
 
1422
2030
  encrypted: Optional[bool] = False
1423
2031
 
2032
+ @model_serializer(mode="wrap")
2033
+ def serialize_model(self, handler):
2034
+ optional_fields = set(["encrypted"])
2035
+ serialized = handler(self)
2036
+ m = {}
2037
+
2038
+ for n, f in type(self).model_fields.items():
2039
+ k = f.alias or n
2040
+ val = serialized.get(k)
2041
+
2042
+ if val != UNSET_SENTINEL:
2043
+ if val is not None or k not in optional_fields:
2044
+ m[k] = val
2045
+
2046
+ return m
2047
+
1424
2048
 
1425
- RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15McpType = Literal[
2049
+ RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType = Literal[
1426
2050
  "object",
1427
2051
  ]
1428
2052
 
1429
2053
 
1430
- class SchemaTypedDict(TypedDict):
1431
- type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15McpType
2054
+ class AgentToolInputRunSchemaTypedDict(TypedDict):
2055
+ type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
1432
2056
  properties: NotRequired[Dict[str, Any]]
1433
2057
  required: NotRequired[List[str]]
1434
2058
 
1435
2059
 
1436
- class Schema(BaseModel):
1437
- type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15McpType
2060
+ class AgentToolInputRunSchema(BaseModel):
2061
+ type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
1438
2062
 
1439
2063
  properties: Optional[Dict[str, Any]] = None
1440
2064
 
1441
2065
  required: Optional[List[str]] = None
1442
2066
 
2067
+ @model_serializer(mode="wrap")
2068
+ def serialize_model(self, handler):
2069
+ optional_fields = set(["properties", "required"])
2070
+ serialized = handler(self)
2071
+ m = {}
2072
+
2073
+ for n, f in type(self).model_fields.items():
2074
+ k = f.alias or n
2075
+ val = serialized.get(k)
2076
+
2077
+ if val != UNSET_SENTINEL:
2078
+ if val is not None or k not in optional_fields:
2079
+ m[k] = val
2080
+
2081
+ return m
2082
+
1443
2083
 
1444
2084
  class RunAgentAgentToolInputRunToolsTypedDict(TypedDict):
1445
2085
  name: str
1446
- schema_: SchemaTypedDict
2086
+ schema_: AgentToolInputRunSchemaTypedDict
1447
2087
  id: NotRequired[str]
1448
2088
  description: NotRequired[str]
1449
2089
 
@@ -1451,12 +2091,28 @@ class RunAgentAgentToolInputRunToolsTypedDict(TypedDict):
1451
2091
  class RunAgentAgentToolInputRunTools(BaseModel):
1452
2092
  name: str
1453
2093
 
1454
- schema_: Annotated[Schema, pydantic.Field(alias="schema")]
2094
+ schema_: Annotated[AgentToolInputRunSchema, pydantic.Field(alias="schema")]
1455
2095
 
1456
- id: Optional[str] = "01KEXRJ7JQPSGV86505JAYS00Q"
2096
+ id: Optional[str] = "01KG2RZQ82YZ0379R2S5B1486Q"
1457
2097
 
1458
2098
  description: Optional[str] = None
1459
2099
 
2100
+ @model_serializer(mode="wrap")
2101
+ def serialize_model(self, handler):
2102
+ optional_fields = set(["id", "description"])
2103
+ serialized = handler(self)
2104
+ m = {}
2105
+
2106
+ for n, f in type(self).model_fields.items():
2107
+ k = f.alias or n
2108
+ val = serialized.get(k)
2109
+
2110
+ if val != UNSET_SENTINEL:
2111
+ if val is not None or k not in optional_fields:
2112
+ m[k] = val
2113
+
2114
+ return m
2115
+
1460
2116
 
1461
2117
  ConnectionType = Literal[
1462
2118
  "http",
@@ -1489,11 +2145,27 @@ class Mcp(BaseModel):
1489
2145
  headers: Optional[Dict[str, AgentToolInputRunHeaders]] = None
1490
2146
  r"""HTTP headers for MCP server requests with encryption support"""
1491
2147
 
2148
+ @model_serializer(mode="wrap")
2149
+ def serialize_model(self, handler):
2150
+ optional_fields = set(["headers"])
2151
+ serialized = handler(self)
2152
+ m = {}
2153
+
2154
+ for n, f in type(self).model_fields.items():
2155
+ k = f.alias or n
2156
+ val = serialized.get(k)
2157
+
2158
+ if val != UNSET_SENTINEL:
2159
+ if val is not None or k not in optional_fields:
2160
+ m[k] = val
2161
+
2162
+ return m
2163
+
1492
2164
 
1493
2165
  class MCPToolRunTypedDict(TypedDict):
1494
2166
  r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
1495
2167
 
1496
- type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2168
+ type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
1497
2169
  key: str
1498
2170
  r"""Unique key of the tool as it will be displayed in the UI"""
1499
2171
  description: str
@@ -1507,7 +2179,7 @@ class MCPToolRunTypedDict(TypedDict):
1507
2179
  class MCPToolRun(BaseModel):
1508
2180
  r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
1509
2181
 
1510
- type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2182
+ type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
1511
2183
 
1512
2184
  key: str
1513
2185
  r"""Unique key of the tool as it will be displayed in the UI"""
@@ -1523,6 +2195,155 @@ class MCPToolRun(BaseModel):
1523
2195
 
1524
2196
  requires_approval: Optional[bool] = False
1525
2197
 
2198
+ @model_serializer(mode="wrap")
2199
+ def serialize_model(self, handler):
2200
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2201
+ serialized = handler(self)
2202
+ m = {}
2203
+
2204
+ for n, f in type(self).model_fields.items():
2205
+ k = f.alias or n
2206
+ val = serialized.get(k)
2207
+
2208
+ if val != UNSET_SENTINEL:
2209
+ if val is not None or k not in optional_fields:
2210
+ m[k] = val
2211
+
2212
+ return m
2213
+
2214
+
2215
+ RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type = Literal[
2216
+ "json_schema",
2217
+ ]
2218
+
2219
+
2220
+ class SchemaTypedDict(TypedDict):
2221
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2222
+
2223
+ type: str
2224
+ r"""The JSON Schema type"""
2225
+ properties: Dict[str, Any]
2226
+ r"""The properties of the JSON Schema object"""
2227
+ required: List[str]
2228
+ r"""Array of required property names"""
2229
+
2230
+
2231
+ class Schema(BaseModel):
2232
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2233
+
2234
+ model_config = ConfigDict(
2235
+ populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
2236
+ )
2237
+ __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
2238
+
2239
+ type: str
2240
+ r"""The JSON Schema type"""
2241
+
2242
+ properties: Dict[str, Any]
2243
+ r"""The properties of the JSON Schema object"""
2244
+
2245
+ required: List[str]
2246
+ r"""Array of required property names"""
2247
+
2248
+ @property
2249
+ def additional_properties(self):
2250
+ return self.__pydantic_extra__
2251
+
2252
+ @additional_properties.setter
2253
+ def additional_properties(self, value):
2254
+ self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
2255
+
2256
+
2257
+ class AgentToolInputRunJSONSchemaTypedDict(TypedDict):
2258
+ name: str
2259
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
2260
+ description: str
2261
+ r"""A description of what the response format is for. This will be shown to the user."""
2262
+ schema_: SchemaTypedDict
2263
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2264
+ strict: NotRequired[bool]
2265
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
2266
+
2267
+
2268
+ class AgentToolInputRunJSONSchema(BaseModel):
2269
+ name: str
2270
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
2271
+
2272
+ description: str
2273
+ r"""A description of what the response format is for. This will be shown to the user."""
2274
+
2275
+ schema_: Annotated[Schema, pydantic.Field(alias="schema")]
2276
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2277
+
2278
+ strict: Optional[bool] = None
2279
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
2280
+
2281
+ @model_serializer(mode="wrap")
2282
+ def serialize_model(self, handler):
2283
+ optional_fields = set(["strict"])
2284
+ serialized = handler(self)
2285
+ m = {}
2286
+
2287
+ for n, f in type(self).model_fields.items():
2288
+ k = f.alias or n
2289
+ val = serialized.get(k)
2290
+
2291
+ if val != UNSET_SENTINEL:
2292
+ if val is not None or k not in optional_fields:
2293
+ m[k] = val
2294
+
2295
+ return m
2296
+
2297
+
2298
+ class JSONSchemaToolRunTypedDict(TypedDict):
2299
+ r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
2300
+
2301
+ type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2302
+ key: str
2303
+ r"""Unique key of the tool as it will be displayed in the UI"""
2304
+ description: str
2305
+ r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
2306
+ json_schema: AgentToolInputRunJSONSchemaTypedDict
2307
+ id: NotRequired[str]
2308
+ display_name: NotRequired[str]
2309
+ requires_approval: NotRequired[bool]
2310
+
2311
+
2312
+ class JSONSchemaToolRun(BaseModel):
2313
+ r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
2314
+
2315
+ type: RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2316
+
2317
+ key: str
2318
+ r"""Unique key of the tool as it will be displayed in the UI"""
2319
+
2320
+ description: str
2321
+ r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
2322
+
2323
+ json_schema: AgentToolInputRunJSONSchema
2324
+
2325
+ id: Annotated[Optional[str], pydantic.Field(alias="_id")] = None
2326
+
2327
+ display_name: Optional[str] = None
2328
+
2329
+ requires_approval: Optional[bool] = False
2330
+
2331
+ @model_serializer(mode="wrap")
2332
+ def serialize_model(self, handler):
2333
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2334
+ serialized = handler(self)
2335
+ m = {}
2336
+
2337
+ for n, f in type(self).model_fields.items():
2338
+ k = f.alias or n
2339
+ val = serialized.get(k)
2340
+
2341
+ if val != UNSET_SENTINEL:
2342
+ if val is not None or k not in optional_fields:
2343
+ m[k] = val
2344
+
2345
+ return m
2346
+
1526
2347
 
1527
2348
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools14Type = Literal[
1528
2349
  "function",
@@ -1596,6 +2417,22 @@ class AgentToolInputRunFunction(BaseModel):
1596
2417
  parameters: Optional[RunAgentAgentToolInputRunParameters] = None
1597
2418
  r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
1598
2419
 
2420
+ @model_serializer(mode="wrap")
2421
+ def serialize_model(self, handler):
2422
+ optional_fields = set(["description", "strict", "parameters"])
2423
+ serialized = handler(self)
2424
+ m = {}
2425
+
2426
+ for n, f in type(self).model_fields.items():
2427
+ k = f.alias or n
2428
+ val = serialized.get(k)
2429
+
2430
+ if val != UNSET_SENTINEL:
2431
+ if val is not None or k not in optional_fields:
2432
+ m[k] = val
2433
+
2434
+ return m
2435
+
1599
2436
 
1600
2437
  class FunctionToolRunTypedDict(TypedDict):
1601
2438
  r"""Function tool with inline definition for on-the-fly creation in run endpoint"""
@@ -1628,6 +2465,24 @@ class FunctionToolRun(BaseModel):
1628
2465
 
1629
2466
  requires_approval: Optional[bool] = False
1630
2467
 
2468
+ @model_serializer(mode="wrap")
2469
+ def serialize_model(self, handler):
2470
+ optional_fields = set(
2471
+ ["_id", "display_name", "description", "requires_approval"]
2472
+ )
2473
+ serialized = handler(self)
2474
+ m = {}
2475
+
2476
+ for n, f in type(self).model_fields.items():
2477
+ k = f.alias or n
2478
+ val = serialized.get(k)
2479
+
2480
+ if val != UNSET_SENTINEL:
2481
+ if val is not None or k not in optional_fields:
2482
+ m[k] = val
2483
+
2484
+ return m
2485
+
1631
2486
 
1632
2487
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools13Type = Literal["code",]
1633
2488
 
@@ -1695,6 +2550,22 @@ class CodeTool(BaseModel):
1695
2550
  parameters: Optional[AgentToolInputRunParameters] = None
1696
2551
  r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
1697
2552
 
2553
+ @model_serializer(mode="wrap")
2554
+ def serialize_model(self, handler):
2555
+ optional_fields = set(["parameters"])
2556
+ serialized = handler(self)
2557
+ m = {}
2558
+
2559
+ for n, f in type(self).model_fields.items():
2560
+ k = f.alias or n
2561
+ val = serialized.get(k)
2562
+
2563
+ if val != UNSET_SENTINEL:
2564
+ if val is not None or k not in optional_fields:
2565
+ m[k] = val
2566
+
2567
+ return m
2568
+
1698
2569
 
1699
2570
  class CodeToolRunTypedDict(TypedDict):
1700
2571
  r"""Code execution tool with inline definition for on-the-fly creation in run endpoint"""
@@ -1729,6 +2600,22 @@ class CodeToolRun(BaseModel):
1729
2600
 
1730
2601
  requires_approval: Optional[bool] = False
1731
2602
 
2603
+ @model_serializer(mode="wrap")
2604
+ def serialize_model(self, handler):
2605
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2606
+ serialized = handler(self)
2607
+ m = {}
2608
+
2609
+ for n, f in type(self).model_fields.items():
2610
+ k = f.alias or n
2611
+ val = serialized.get(k)
2612
+
2613
+ if val != UNSET_SENTINEL:
2614
+ if val is not None or k not in optional_fields:
2615
+ m[k] = val
2616
+
2617
+ return m
2618
+
1732
2619
 
1733
2620
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12Type = Literal["http",]
1734
2621
 
@@ -1747,10 +2634,26 @@ class Headers2TypedDict(TypedDict):
1747
2634
  encrypted: NotRequired[bool]
1748
2635
 
1749
2636
 
1750
- class Headers2(BaseModel):
1751
- value: str
2637
+ class Headers2(BaseModel):
2638
+ value: str
2639
+
2640
+ encrypted: Optional[bool] = False
2641
+
2642
+ @model_serializer(mode="wrap")
2643
+ def serialize_model(self, handler):
2644
+ optional_fields = set(["encrypted"])
2645
+ serialized = handler(self)
2646
+ m = {}
2647
+
2648
+ for n, f in type(self).model_fields.items():
2649
+ k = f.alias or n
2650
+ val = serialized.get(k)
2651
+
2652
+ if val != UNSET_SENTINEL:
2653
+ if val is not None or k not in optional_fields:
2654
+ m[k] = val
1752
2655
 
1753
- encrypted: Optional[bool] = False
2656
+ return m
1754
2657
 
1755
2658
 
1756
2659
  HeadersTypedDict = TypeAliasType("HeadersTypedDict", Union[Headers2TypedDict, str])
@@ -1787,6 +2690,22 @@ class Blueprint(BaseModel):
1787
2690
  body: Optional[Dict[str, Any]] = None
1788
2691
  r"""The body to send with the request."""
1789
2692
 
2693
+ @model_serializer(mode="wrap")
2694
+ def serialize_model(self, handler):
2695
+ optional_fields = set(["headers", "body"])
2696
+ serialized = handler(self)
2697
+ m = {}
2698
+
2699
+ for n, f in type(self).model_fields.items():
2700
+ k = f.alias or n
2701
+ val = serialized.get(k)
2702
+
2703
+ if val != UNSET_SENTINEL:
2704
+ if val is not None or k not in optional_fields:
2705
+ m[k] = val
2706
+
2707
+ return m
2708
+
1790
2709
 
1791
2710
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12HTTPType = Literal[
1792
2711
  "string",
@@ -1828,6 +2747,22 @@ class Arguments(BaseModel):
1828
2747
  default_value: Optional[DefaultValue] = None
1829
2748
  r"""The default value of the argument."""
1830
2749
 
2750
+ @model_serializer(mode="wrap")
2751
+ def serialize_model(self, handler):
2752
+ optional_fields = set(["send_to_model", "default_value"])
2753
+ serialized = handler(self)
2754
+ m = {}
2755
+
2756
+ for n, f in type(self).model_fields.items():
2757
+ k = f.alias or n
2758
+ val = serialized.get(k)
2759
+
2760
+ if val != UNSET_SENTINEL:
2761
+ if val is not None or k not in optional_fields:
2762
+ m[k] = val
2763
+
2764
+ return m
2765
+
1831
2766
 
1832
2767
  class HTTPTypedDict(TypedDict):
1833
2768
  blueprint: BlueprintTypedDict
@@ -1843,6 +2778,22 @@ class HTTP(BaseModel):
1843
2778
  arguments: Optional[Dict[str, Arguments]] = None
1844
2779
  r"""The arguments to send with the request. The keys will be used to replace the placeholders in the `blueprint` field."""
1845
2780
 
2781
+ @model_serializer(mode="wrap")
2782
+ def serialize_model(self, handler):
2783
+ optional_fields = set(["arguments"])
2784
+ serialized = handler(self)
2785
+ m = {}
2786
+
2787
+ for n, f in type(self).model_fields.items():
2788
+ k = f.alias or n
2789
+ val = serialized.get(k)
2790
+
2791
+ if val != UNSET_SENTINEL:
2792
+ if val is not None or k not in optional_fields:
2793
+ m[k] = val
2794
+
2795
+ return m
2796
+
1846
2797
 
1847
2798
  class HTTPToolRunTypedDict(TypedDict):
1848
2799
  r"""HTTP tool with inline definition for on-the-fly creation in run endpoint"""
@@ -1877,6 +2828,22 @@ class HTTPToolRun(BaseModel):
1877
2828
 
1878
2829
  requires_approval: Optional[bool] = False
1879
2830
 
2831
+ @model_serializer(mode="wrap")
2832
+ def serialize_model(self, handler):
2833
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2834
+ serialized = handler(self)
2835
+ m = {}
2836
+
2837
+ for n, f in type(self).model_fields.items():
2838
+ k = f.alias or n
2839
+ val = serialized.get(k)
2840
+
2841
+ if val != UNSET_SENTINEL:
2842
+ if val is not None or k not in optional_fields:
2843
+ m[k] = val
2844
+
2845
+ return m
2846
+
1880
2847
 
1881
2848
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools11Type = Literal[
1882
2849
  "current_date",
@@ -1899,6 +2866,22 @@ class AgentToolInputRunCurrentDateTool(BaseModel):
1899
2866
  requires_approval: Optional[bool] = None
1900
2867
  r"""Whether this tool requires approval before execution"""
1901
2868
 
2869
+ @model_serializer(mode="wrap")
2870
+ def serialize_model(self, handler):
2871
+ optional_fields = set(["requires_approval"])
2872
+ serialized = handler(self)
2873
+ m = {}
2874
+
2875
+ for n, f in type(self).model_fields.items():
2876
+ k = f.alias or n
2877
+ val = serialized.get(k)
2878
+
2879
+ if val != UNSET_SENTINEL:
2880
+ if val is not None or k not in optional_fields:
2881
+ m[k] = val
2882
+
2883
+ return m
2884
+
1902
2885
 
1903
2886
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools10Type = Literal[
1904
2887
  "query_knowledge_base",
@@ -1921,6 +2904,22 @@ class AgentToolInputRunQueryKnowledgeBaseTool(BaseModel):
1921
2904
  requires_approval: Optional[bool] = None
1922
2905
  r"""Whether this tool requires approval before execution"""
1923
2906
 
2907
+ @model_serializer(mode="wrap")
2908
+ def serialize_model(self, handler):
2909
+ optional_fields = set(["requires_approval"])
2910
+ serialized = handler(self)
2911
+ m = {}
2912
+
2913
+ for n, f in type(self).model_fields.items():
2914
+ k = f.alias or n
2915
+ val = serialized.get(k)
2916
+
2917
+ if val != UNSET_SENTINEL:
2918
+ if val is not None or k not in optional_fields:
2919
+ m[k] = val
2920
+
2921
+ return m
2922
+
1924
2923
 
1925
2924
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools9Type = Literal[
1926
2925
  "retrieve_knowledge_bases",
@@ -1943,6 +2942,22 @@ class AgentToolInputRunRetrieveKnowledgeBasesTool(BaseModel):
1943
2942
  requires_approval: Optional[bool] = None
1944
2943
  r"""Whether this tool requires approval before execution"""
1945
2944
 
2945
+ @model_serializer(mode="wrap")
2946
+ def serialize_model(self, handler):
2947
+ optional_fields = set(["requires_approval"])
2948
+ serialized = handler(self)
2949
+ m = {}
2950
+
2951
+ for n, f in type(self).model_fields.items():
2952
+ k = f.alias or n
2953
+ val = serialized.get(k)
2954
+
2955
+ if val != UNSET_SENTINEL:
2956
+ if val is not None or k not in optional_fields:
2957
+ m[k] = val
2958
+
2959
+ return m
2960
+
1946
2961
 
1947
2962
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools8Type = Literal[
1948
2963
  "delete_memory_document",
@@ -1965,6 +2980,22 @@ class AgentToolInputRunDeleteMemoryDocumentTool(BaseModel):
1965
2980
  requires_approval: Optional[bool] = None
1966
2981
  r"""Whether this tool requires approval before execution"""
1967
2982
 
2983
+ @model_serializer(mode="wrap")
2984
+ def serialize_model(self, handler):
2985
+ optional_fields = set(["requires_approval"])
2986
+ serialized = handler(self)
2987
+ m = {}
2988
+
2989
+ for n, f in type(self).model_fields.items():
2990
+ k = f.alias or n
2991
+ val = serialized.get(k)
2992
+
2993
+ if val != UNSET_SENTINEL:
2994
+ if val is not None or k not in optional_fields:
2995
+ m[k] = val
2996
+
2997
+ return m
2998
+
1968
2999
 
1969
3000
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsToolsType = Literal[
1970
3001
  "retrieve_memory_stores",
@@ -1987,6 +3018,22 @@ class AgentToolInputRunRetrieveMemoryStoresTool(BaseModel):
1987
3018
  requires_approval: Optional[bool] = None
1988
3019
  r"""Whether this tool requires approval before execution"""
1989
3020
 
3021
+ @model_serializer(mode="wrap")
3022
+ def serialize_model(self, handler):
3023
+ optional_fields = set(["requires_approval"])
3024
+ serialized = handler(self)
3025
+ m = {}
3026
+
3027
+ for n, f in type(self).model_fields.items():
3028
+ k = f.alias or n
3029
+ val = serialized.get(k)
3030
+
3031
+ if val != UNSET_SENTINEL:
3032
+ if val is not None or k not in optional_fields:
3033
+ m[k] = val
3034
+
3035
+ return m
3036
+
1990
3037
 
1991
3038
  RunAgentAgentToolInputRunAgentsRequestRequestBodySettingsType = Literal[
1992
3039
  "write_memory_store",
@@ -2009,6 +3056,22 @@ class AgentToolInputRunWriteMemoryStoreTool(BaseModel):
2009
3056
  requires_approval: Optional[bool] = None
2010
3057
  r"""Whether this tool requires approval before execution"""
2011
3058
 
3059
+ @model_serializer(mode="wrap")
3060
+ def serialize_model(self, handler):
3061
+ optional_fields = set(["requires_approval"])
3062
+ serialized = handler(self)
3063
+ m = {}
3064
+
3065
+ for n, f in type(self).model_fields.items():
3066
+ k = f.alias or n
3067
+ val = serialized.get(k)
3068
+
3069
+ if val != UNSET_SENTINEL:
3070
+ if val is not None or k not in optional_fields:
3071
+ m[k] = val
3072
+
3073
+ return m
3074
+
2012
3075
 
2013
3076
  RunAgentAgentToolInputRunAgentsRequestRequestBodyType = Literal["query_memory_store",]
2014
3077
 
@@ -2029,6 +3092,22 @@ class AgentToolInputRunQueryMemoryStoreTool(BaseModel):
2029
3092
  requires_approval: Optional[bool] = None
2030
3093
  r"""Whether this tool requires approval before execution"""
2031
3094
 
3095
+ @model_serializer(mode="wrap")
3096
+ def serialize_model(self, handler):
3097
+ optional_fields = set(["requires_approval"])
3098
+ serialized = handler(self)
3099
+ m = {}
3100
+
3101
+ for n, f in type(self).model_fields.items():
3102
+ k = f.alias or n
3103
+ val = serialized.get(k)
3104
+
3105
+ if val != UNSET_SENTINEL:
3106
+ if val is not None or k not in optional_fields:
3107
+ m[k] = val
3108
+
3109
+ return m
3110
+
2032
3111
 
2033
3112
  RunAgentAgentToolInputRunAgentsRequestType = Literal["retrieve_agents",]
2034
3113
 
@@ -2049,6 +3128,22 @@ class AgentToolInputRunRetrieveAgentsTool(BaseModel):
2049
3128
  requires_approval: Optional[bool] = None
2050
3129
  r"""Whether this tool requires approval before execution"""
2051
3130
 
3131
+ @model_serializer(mode="wrap")
3132
+ def serialize_model(self, handler):
3133
+ optional_fields = set(["requires_approval"])
3134
+ serialized = handler(self)
3135
+ m = {}
3136
+
3137
+ for n, f in type(self).model_fields.items():
3138
+ k = f.alias or n
3139
+ val = serialized.get(k)
3140
+
3141
+ if val != UNSET_SENTINEL:
3142
+ if val is not None or k not in optional_fields:
3143
+ m[k] = val
3144
+
3145
+ return m
3146
+
2052
3147
 
2053
3148
  RunAgentAgentToolInputRunAgentsType = Literal["call_sub_agent",]
2054
3149
 
@@ -2069,6 +3164,22 @@ class AgentToolInputRunCallSubAgentTool(BaseModel):
2069
3164
  requires_approval: Optional[bool] = None
2070
3165
  r"""Whether this tool requires approval before execution"""
2071
3166
 
3167
+ @model_serializer(mode="wrap")
3168
+ def serialize_model(self, handler):
3169
+ optional_fields = set(["requires_approval"])
3170
+ serialized = handler(self)
3171
+ m = {}
3172
+
3173
+ for n, f in type(self).model_fields.items():
3174
+ k = f.alias or n
3175
+ val = serialized.get(k)
3176
+
3177
+ if val != UNSET_SENTINEL:
3178
+ if val is not None or k not in optional_fields:
3179
+ m[k] = val
3180
+
3181
+ return m
3182
+
2072
3183
 
2073
3184
  RunAgentAgentToolInputRunType = Literal["web_scraper",]
2074
3185
 
@@ -2089,6 +3200,22 @@ class AgentToolInputRunWebScraperTool(BaseModel):
2089
3200
  requires_approval: Optional[bool] = None
2090
3201
  r"""Whether this tool requires approval before execution"""
2091
3202
 
3203
+ @model_serializer(mode="wrap")
3204
+ def serialize_model(self, handler):
3205
+ optional_fields = set(["requires_approval"])
3206
+ serialized = handler(self)
3207
+ m = {}
3208
+
3209
+ for n, f in type(self).model_fields.items():
3210
+ k = f.alias or n
3211
+ val = serialized.get(k)
3212
+
3213
+ if val != UNSET_SENTINEL:
3214
+ if val is not None or k not in optional_fields:
3215
+ m[k] = val
3216
+
3217
+ return m
3218
+
2092
3219
 
2093
3220
  AgentToolInputRunType = Literal["google_search",]
2094
3221
 
@@ -2109,6 +3236,22 @@ class AgentToolInputRunGoogleSearchTool(BaseModel):
2109
3236
  requires_approval: Optional[bool] = None
2110
3237
  r"""Whether this tool requires approval before execution"""
2111
3238
 
3239
+ @model_serializer(mode="wrap")
3240
+ def serialize_model(self, handler):
3241
+ optional_fields = set(["requires_approval"])
3242
+ serialized = handler(self)
3243
+ m = {}
3244
+
3245
+ for n, f in type(self).model_fields.items():
3246
+ k = f.alias or n
3247
+ val = serialized.get(k)
3248
+
3249
+ if val != UNSET_SENTINEL:
3250
+ if val is not None or k not in optional_fields:
3251
+ m[k] = val
3252
+
3253
+ return m
3254
+
2112
3255
 
2113
3256
  AgentToolInputRunTypedDict = TypeAliasType(
2114
3257
  "AgentToolInputRunTypedDict",
@@ -2127,10 +3270,11 @@ AgentToolInputRunTypedDict = TypeAliasType(
2127
3270
  HTTPToolRunTypedDict,
2128
3271
  CodeToolRunTypedDict,
2129
3272
  FunctionToolRunTypedDict,
3273
+ JSONSchemaToolRunTypedDict,
2130
3274
  MCPToolRunTypedDict,
2131
3275
  ],
2132
3276
  )
2133
- r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
3277
+ r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
2134
3278
 
2135
3279
 
2136
3280
  AgentToolInputRun = Annotated[
@@ -2155,11 +3299,12 @@ AgentToolInputRun = Annotated[
2155
3299
  Annotated[HTTPToolRun, Tag("http")],
2156
3300
  Annotated[CodeToolRun, Tag("code")],
2157
3301
  Annotated[FunctionToolRun, Tag("function")],
3302
+ Annotated[JSONSchemaToolRun, Tag("json_schema")],
2158
3303
  Annotated[MCPToolRun, Tag("mcp")],
2159
3304
  ],
2160
3305
  Discriminator(lambda m: get_discriminator(m, "type", "type")),
2161
3306
  ]
2162
- r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
3307
+ r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
2163
3308
 
2164
3309
 
2165
3310
  RunAgentToolApprovalRequired = Literal[
@@ -2196,6 +3341,22 @@ class RunAgentEvaluators(BaseModel):
2196
3341
  sample_rate: Optional[float] = 50
2197
3342
  r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
2198
3343
 
3344
+ @model_serializer(mode="wrap")
3345
+ def serialize_model(self, handler):
3346
+ optional_fields = set(["sample_rate"])
3347
+ serialized = handler(self)
3348
+ m = {}
3349
+
3350
+ for n, f in type(self).model_fields.items():
3351
+ k = f.alias or n
3352
+ val = serialized.get(k)
3353
+
3354
+ if val != UNSET_SENTINEL:
3355
+ if val is not None or k not in optional_fields:
3356
+ m[k] = val
3357
+
3358
+ return m
3359
+
2199
3360
 
2200
3361
  RunAgentAgentsExecuteOn = Literal[
2201
3362
  "input",
@@ -2223,6 +3384,22 @@ class RunAgentGuardrails(BaseModel):
2223
3384
  sample_rate: Optional[float] = 50
2224
3385
  r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
2225
3386
 
3387
+ @model_serializer(mode="wrap")
3388
+ def serialize_model(self, handler):
3389
+ optional_fields = set(["sample_rate"])
3390
+ serialized = handler(self)
3391
+ m = {}
3392
+
3393
+ for n, f in type(self).model_fields.items():
3394
+ k = f.alias or n
3395
+ val = serialized.get(k)
3396
+
3397
+ if val != UNSET_SENTINEL:
3398
+ if val is not None or k not in optional_fields:
3399
+ m[k] = val
3400
+
3401
+ return m
3402
+
2226
3403
 
2227
3404
  class RunAgentSettingsTypedDict(TypedDict):
2228
3405
  tools: NotRequired[List[AgentToolInputRunTypedDict]]
@@ -2258,6 +3435,31 @@ class RunAgentSettings(BaseModel):
2258
3435
  guardrails: Optional[List[RunAgentGuardrails]] = None
2259
3436
  r"""Configuration for a guardrail applied to the agent"""
2260
3437
 
3438
+ @model_serializer(mode="wrap")
3439
+ def serialize_model(self, handler):
3440
+ optional_fields = set(
3441
+ [
3442
+ "tools",
3443
+ "tool_approval_required",
3444
+ "max_iterations",
3445
+ "max_execution_time",
3446
+ "evaluators",
3447
+ "guardrails",
3448
+ ]
3449
+ )
3450
+ serialized = handler(self)
3451
+ m = {}
3452
+
3453
+ for n, f in type(self).model_fields.items():
3454
+ k = f.alias or n
3455
+ val = serialized.get(k)
3456
+
3457
+ if val != UNSET_SENTINEL:
3458
+ if val is not None or k not in optional_fields:
3459
+ m[k] = val
3460
+
3461
+ return m
3462
+
2261
3463
 
2262
3464
  class RunAgentRequestBodyTypedDict(TypedDict):
2263
3465
  key: str
@@ -2376,6 +3578,38 @@ class RunAgentRequestBody(BaseModel):
2376
3578
  metadata: Optional[Dict[str, Any]] = None
2377
3579
  r"""Optional metadata for the agent run as key-value pairs that will be included in traces"""
2378
3580
 
3581
+ @model_serializer(mode="wrap")
3582
+ def serialize_model(self, handler):
3583
+ optional_fields = set(
3584
+ [
3585
+ "task_id",
3586
+ "fallback_models",
3587
+ "variables",
3588
+ "identity",
3589
+ "contact",
3590
+ "thread",
3591
+ "memory",
3592
+ "description",
3593
+ "system_prompt",
3594
+ "memory_stores",
3595
+ "knowledge_bases",
3596
+ "team_of_agents",
3597
+ "metadata",
3598
+ ]
3599
+ )
3600
+ serialized = handler(self)
3601
+ m = {}
3602
+
3603
+ for n, f in type(self).model_fields.items():
3604
+ k = f.alias or n
3605
+ val = serialized.get(k)
3606
+
3607
+ if val != UNSET_SENTINEL:
3608
+ if val is not None or k not in optional_fields:
3609
+ m[k] = val
3610
+
3611
+ return m
3612
+
2379
3613
 
2380
3614
  RunAgentKind = Literal["task",]
2381
3615
  r"""A2A entity type identifier"""
@@ -2478,6 +3712,22 @@ class RunAgentTaskStatus(BaseModel):
2478
3712
  message: Optional[RunAgentTaskStatusMessage] = None
2479
3713
  r"""Optional A2A message providing additional context about the current status"""
2480
3714
 
3715
+ @model_serializer(mode="wrap")
3716
+ def serialize_model(self, handler):
3717
+ optional_fields = set(["timestamp", "message"])
3718
+ serialized = handler(self)
3719
+ m = {}
3720
+
3721
+ for n, f in type(self).model_fields.items():
3722
+ k = f.alias or n
3723
+ val = serialized.get(k)
3724
+
3725
+ if val != UNSET_SENTINEL:
3726
+ if val is not None or k not in optional_fields:
3727
+ m[k] = val
3728
+
3729
+ return m
3730
+
2481
3731
 
2482
3732
  class RunAgentA2ATaskResponseTypedDict(TypedDict):
2483
3733
  r"""Response format following the Agent-to-Agent (A2A) protocol. Returned when starting or continuing an agent task execution."""
@@ -2511,3 +3761,19 @@ class RunAgentA2ATaskResponse(BaseModel):
2511
3761
 
2512
3762
  metadata: Optional[Dict[str, Any]] = None
2513
3763
  r"""Task metadata containing workspace_id and trace_id for feedback and tracking"""
3764
+
3765
+ @model_serializer(mode="wrap")
3766
+ def serialize_model(self, handler):
3767
+ optional_fields = set(["metadata"])
3768
+ serialized = handler(self)
3769
+ m = {}
3770
+
3771
+ for n, f in type(self).model_fields.items():
3772
+ k = f.alias or n
3773
+ val = serialized.get(k)
3774
+
3775
+ if val != UNSET_SENTINEL:
3776
+ if val is not None or k not in optional_fields:
3777
+ m[k] = val
3778
+
3779
+ return m