orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -167,6 +167,22 @@ class StreamRunAgentResponseFormatAgentsJSONSchema(BaseModel):
167
167
  strict: Optional[bool] = False
168
168
  r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
169
169
 
170
+ @model_serializer(mode="wrap")
171
+ def serialize_model(self, handler):
172
+ optional_fields = set(["description", "schema", "strict"])
173
+ serialized = handler(self)
174
+ m = {}
175
+
176
+ for n, f in type(self).model_fields.items():
177
+ k = f.alias or n
178
+ val = serialized.get(k)
179
+
180
+ if val != UNSET_SENTINEL:
181
+ if val is not None or k not in optional_fields:
182
+ m[k] = val
183
+
184
+ return m
185
+
170
186
 
171
187
  class StreamRunAgentResponseFormatJSONSchemaTypedDict(TypedDict):
172
188
  r"""
@@ -297,6 +313,22 @@ class StreamRunAgentModelConfigurationStreamOptions(BaseModel):
297
313
  include_usage: Optional[bool] = None
298
314
  r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
299
315
 
316
+ @model_serializer(mode="wrap")
317
+ def serialize_model(self, handler):
318
+ optional_fields = set(["include_usage"])
319
+ serialized = handler(self)
320
+ m = {}
321
+
322
+ for n, f in type(self).model_fields.items():
323
+ k = f.alias or n
324
+ val = serialized.get(k)
325
+
326
+ if val != UNSET_SENTINEL:
327
+ if val is not None or k not in optional_fields:
328
+ m[k] = val
329
+
330
+ return m
331
+
300
332
 
301
333
  StreamRunAgentModelConfigurationThinkingTypedDict = TypeAliasType(
302
334
  "StreamRunAgentModelConfigurationThinkingTypedDict",
@@ -339,6 +371,22 @@ class StreamRunAgentToolChoice2(BaseModel):
339
371
  type: Optional[StreamRunAgentToolChoiceType] = None
340
372
  r"""The type of the tool. Currently, only function is supported."""
341
373
 
374
+ @model_serializer(mode="wrap")
375
+ def serialize_model(self, handler):
376
+ optional_fields = set(["type"])
377
+ serialized = handler(self)
378
+ m = {}
379
+
380
+ for n, f in type(self).model_fields.items():
381
+ k = f.alias or n
382
+ val = serialized.get(k)
383
+
384
+ if val != UNSET_SENTINEL:
385
+ if val is not None or k not in optional_fields:
386
+ m[k] = val
387
+
388
+ return m
389
+
342
390
 
343
391
  StreamRunAgentToolChoice1 = Literal[
344
392
  "none",
@@ -405,6 +453,156 @@ class StreamRunAgentModelConfigurationGuardrails(BaseModel):
405
453
  r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
406
454
 
407
455
 
456
+ class StreamRunAgentModelConfigurationFallbacksTypedDict(TypedDict):
457
+ model: str
458
+ r"""Fallback model identifier"""
459
+
460
+
461
+ class StreamRunAgentModelConfigurationFallbacks(BaseModel):
462
+ model: str
463
+ r"""Fallback model identifier"""
464
+
465
+
466
+ class StreamRunAgentModelConfigurationRetryTypedDict(TypedDict):
467
+ r"""Retry configuration for the request"""
468
+
469
+ count: NotRequired[float]
470
+ r"""Number of retry attempts (1-5)"""
471
+ on_codes: NotRequired[List[float]]
472
+ r"""HTTP status codes that trigger retry logic"""
473
+
474
+
475
+ class StreamRunAgentModelConfigurationRetry(BaseModel):
476
+ r"""Retry configuration for the request"""
477
+
478
+ count: Optional[float] = 3
479
+ r"""Number of retry attempts (1-5)"""
480
+
481
+ on_codes: Optional[List[float]] = None
482
+ r"""HTTP status codes that trigger retry logic"""
483
+
484
+ @model_serializer(mode="wrap")
485
+ def serialize_model(self, handler):
486
+ optional_fields = set(["count", "on_codes"])
487
+ serialized = handler(self)
488
+ m = {}
489
+
490
+ for n, f in type(self).model_fields.items():
491
+ k = f.alias or n
492
+ val = serialized.get(k)
493
+
494
+ if val != UNSET_SENTINEL:
495
+ if val is not None or k not in optional_fields:
496
+ m[k] = val
497
+
498
+ return m
499
+
500
+
501
+ StreamRunAgentModelConfigurationType = Literal["exact_match",]
502
+
503
+
504
+ class StreamRunAgentModelConfigurationCacheTypedDict(TypedDict):
505
+ r"""Cache configuration for the request."""
506
+
507
+ type: StreamRunAgentModelConfigurationType
508
+ ttl: NotRequired[float]
509
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
510
+
511
+
512
+ class StreamRunAgentModelConfigurationCache(BaseModel):
513
+ r"""Cache configuration for the request."""
514
+
515
+ type: StreamRunAgentModelConfigurationType
516
+
517
+ ttl: Optional[float] = 1800
518
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
519
+
520
+ @model_serializer(mode="wrap")
521
+ def serialize_model(self, handler):
522
+ optional_fields = set(["ttl"])
523
+ serialized = handler(self)
524
+ m = {}
525
+
526
+ for n, f in type(self).model_fields.items():
527
+ k = f.alias or n
528
+ val = serialized.get(k)
529
+
530
+ if val != UNSET_SENTINEL:
531
+ if val is not None or k not in optional_fields:
532
+ m[k] = val
533
+
534
+ return m
535
+
536
+
537
+ StreamRunAgentLoadBalancerType = Literal["weight_based",]
538
+
539
+
540
+ class StreamRunAgentLoadBalancerModelsTypedDict(TypedDict):
541
+ model: str
542
+ r"""Model identifier for load balancing"""
543
+ weight: NotRequired[float]
544
+ r"""Weight assigned to this model for load balancing"""
545
+
546
+
547
+ class StreamRunAgentLoadBalancerModels(BaseModel):
548
+ model: str
549
+ r"""Model identifier for load balancing"""
550
+
551
+ weight: Optional[float] = 0.5
552
+ r"""Weight assigned to this model for load balancing"""
553
+
554
+ @model_serializer(mode="wrap")
555
+ def serialize_model(self, handler):
556
+ optional_fields = set(["weight"])
557
+ serialized = handler(self)
558
+ m = {}
559
+
560
+ for n, f in type(self).model_fields.items():
561
+ k = f.alias or n
562
+ val = serialized.get(k)
563
+
564
+ if val != UNSET_SENTINEL:
565
+ if val is not None or k not in optional_fields:
566
+ m[k] = val
567
+
568
+ return m
569
+
570
+
571
+ class StreamRunAgentLoadBalancer1TypedDict(TypedDict):
572
+ type: StreamRunAgentLoadBalancerType
573
+ models: List[StreamRunAgentLoadBalancerModelsTypedDict]
574
+
575
+
576
+ class StreamRunAgentLoadBalancer1(BaseModel):
577
+ type: StreamRunAgentLoadBalancerType
578
+
579
+ models: List[StreamRunAgentLoadBalancerModels]
580
+
581
+
582
+ StreamRunAgentModelConfigurationLoadBalancerTypedDict = (
583
+ StreamRunAgentLoadBalancer1TypedDict
584
+ )
585
+ r"""Load balancer configuration for the request."""
586
+
587
+
588
+ StreamRunAgentModelConfigurationLoadBalancer = StreamRunAgentLoadBalancer1
589
+ r"""Load balancer configuration for the request."""
590
+
591
+
592
+ class StreamRunAgentModelConfigurationTimeoutTypedDict(TypedDict):
593
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
594
+
595
+ call_timeout: float
596
+ r"""Timeout value in milliseconds"""
597
+
598
+
599
+ class StreamRunAgentModelConfigurationTimeout(BaseModel):
600
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
601
+
602
+ call_timeout: float
603
+ r"""Timeout value in milliseconds"""
604
+
605
+
408
606
  class StreamRunAgentModelConfigurationParametersTypedDict(TypedDict):
409
607
  r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
410
608
 
@@ -466,6 +664,16 @@ class StreamRunAgentModelConfigurationParametersTypedDict(TypedDict):
466
664
  r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
467
665
  guardrails: NotRequired[List[StreamRunAgentModelConfigurationGuardrailsTypedDict]]
468
666
  r"""A list of guardrails to apply to the request."""
667
+ fallbacks: NotRequired[List[StreamRunAgentModelConfigurationFallbacksTypedDict]]
668
+ r"""Array of fallback models to use if primary model fails"""
669
+ retry: NotRequired[StreamRunAgentModelConfigurationRetryTypedDict]
670
+ r"""Retry configuration for the request"""
671
+ cache: NotRequired[StreamRunAgentModelConfigurationCacheTypedDict]
672
+ r"""Cache configuration for the request."""
673
+ load_balancer: NotRequired[StreamRunAgentModelConfigurationLoadBalancerTypedDict]
674
+ r"""Load balancer configuration for the request."""
675
+ timeout: NotRequired[StreamRunAgentModelConfigurationTimeoutTypedDict]
676
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
469
677
 
470
678
 
471
679
  class StreamRunAgentModelConfigurationParameters(BaseModel):
@@ -551,77 +759,96 @@ class StreamRunAgentModelConfigurationParameters(BaseModel):
551
759
  guardrails: Optional[List[StreamRunAgentModelConfigurationGuardrails]] = None
552
760
  r"""A list of guardrails to apply to the request."""
553
761
 
762
+ fallbacks: Optional[List[StreamRunAgentModelConfigurationFallbacks]] = None
763
+ r"""Array of fallback models to use if primary model fails"""
764
+
765
+ retry: Optional[StreamRunAgentModelConfigurationRetry] = None
766
+ r"""Retry configuration for the request"""
767
+
768
+ cache: Optional[StreamRunAgentModelConfigurationCache] = None
769
+ r"""Cache configuration for the request."""
770
+
771
+ load_balancer: Optional[StreamRunAgentModelConfigurationLoadBalancer] = None
772
+ r"""Load balancer configuration for the request."""
773
+
774
+ timeout: Optional[StreamRunAgentModelConfigurationTimeout] = None
775
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
776
+
554
777
  @model_serializer(mode="wrap")
555
778
  def serialize_model(self, handler):
556
- optional_fields = [
557
- "audio",
558
- "frequency_penalty",
559
- "max_tokens",
560
- "max_completion_tokens",
561
- "logprobs",
562
- "top_logprobs",
563
- "n",
564
- "presence_penalty",
565
- "response_format",
566
- "reasoning_effort",
567
- "verbosity",
568
- "seed",
569
- "stop",
570
- "stream_options",
571
- "thinking",
572
- "temperature",
573
- "top_p",
574
- "top_k",
575
- "tool_choice",
576
- "parallel_tool_calls",
577
- "modalities",
578
- "guardrails",
579
- ]
580
- nullable_fields = [
581
- "audio",
582
- "frequency_penalty",
583
- "max_tokens",
584
- "max_completion_tokens",
585
- "logprobs",
586
- "top_logprobs",
587
- "n",
588
- "presence_penalty",
589
- "seed",
590
- "stop",
591
- "stream_options",
592
- "temperature",
593
- "top_p",
594
- "top_k",
595
- "modalities",
596
- ]
597
- null_default_fields = []
598
-
779
+ optional_fields = set(
780
+ [
781
+ "audio",
782
+ "frequency_penalty",
783
+ "max_tokens",
784
+ "max_completion_tokens",
785
+ "logprobs",
786
+ "top_logprobs",
787
+ "n",
788
+ "presence_penalty",
789
+ "response_format",
790
+ "reasoning_effort",
791
+ "verbosity",
792
+ "seed",
793
+ "stop",
794
+ "stream_options",
795
+ "thinking",
796
+ "temperature",
797
+ "top_p",
798
+ "top_k",
799
+ "tool_choice",
800
+ "parallel_tool_calls",
801
+ "modalities",
802
+ "guardrails",
803
+ "fallbacks",
804
+ "retry",
805
+ "cache",
806
+ "load_balancer",
807
+ "timeout",
808
+ ]
809
+ )
810
+ nullable_fields = set(
811
+ [
812
+ "audio",
813
+ "frequency_penalty",
814
+ "max_tokens",
815
+ "max_completion_tokens",
816
+ "logprobs",
817
+ "top_logprobs",
818
+ "n",
819
+ "presence_penalty",
820
+ "seed",
821
+ "stop",
822
+ "stream_options",
823
+ "temperature",
824
+ "top_p",
825
+ "top_k",
826
+ "modalities",
827
+ ]
828
+ )
599
829
  serialized = handler(self)
600
-
601
830
  m = {}
602
831
 
603
832
  for n, f in type(self).model_fields.items():
604
833
  k = f.alias or n
605
834
  val = serialized.get(k)
606
- serialized.pop(k, None)
607
-
608
- optional_nullable = k in optional_fields and k in nullable_fields
609
- is_set = (
610
- self.__pydantic_fields_set__.intersection({n})
611
- or k in null_default_fields
612
- ) # pylint: disable=no-member
613
-
614
- if val is not None and val != UNSET_SENTINEL:
615
- m[k] = val
616
- elif val != UNSET_SENTINEL and (
617
- not k in optional_fields or (optional_nullable and is_set)
618
- ):
619
- m[k] = val
835
+ is_nullable_and_explicitly_set = (
836
+ k in nullable_fields
837
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
838
+ )
839
+
840
+ if val != UNSET_SENTINEL:
841
+ if (
842
+ val is not None
843
+ or k not in optional_fields
844
+ or is_nullable_and_explicitly_set
845
+ ):
846
+ m[k] = val
620
847
 
621
848
  return m
622
849
 
623
850
 
624
- class StreamRunAgentModelConfigurationRetryTypedDict(TypedDict):
851
+ class StreamRunAgentModelConfigurationAgentsRetryTypedDict(TypedDict):
625
852
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
626
853
 
627
854
  count: NotRequired[float]
@@ -630,7 +857,7 @@ class StreamRunAgentModelConfigurationRetryTypedDict(TypedDict):
630
857
  r"""HTTP status codes that trigger retry logic"""
631
858
 
632
859
 
633
- class StreamRunAgentModelConfigurationRetry(BaseModel):
860
+ class StreamRunAgentModelConfigurationAgentsRetry(BaseModel):
634
861
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
635
862
 
636
863
  count: Optional[float] = 3
@@ -639,6 +866,22 @@ class StreamRunAgentModelConfigurationRetry(BaseModel):
639
866
  on_codes: Optional[List[float]] = None
640
867
  r"""HTTP status codes that trigger retry logic"""
641
868
 
869
+ @model_serializer(mode="wrap")
870
+ def serialize_model(self, handler):
871
+ optional_fields = set(["count", "on_codes"])
872
+ serialized = handler(self)
873
+ m = {}
874
+
875
+ for n, f in type(self).model_fields.items():
876
+ k = f.alias or n
877
+ val = serialized.get(k)
878
+
879
+ if val != UNSET_SENTINEL:
880
+ if val is not None or k not in optional_fields:
881
+ m[k] = val
882
+
883
+ return m
884
+
642
885
 
643
886
  class StreamRunAgentModelConfiguration2TypedDict(TypedDict):
644
887
  r"""
@@ -650,7 +893,7 @@ class StreamRunAgentModelConfiguration2TypedDict(TypedDict):
650
893
  r"""A model ID string (e.g., `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`). Only models that support tool calling can be used with agents."""
651
894
  parameters: NotRequired[StreamRunAgentModelConfigurationParametersTypedDict]
652
895
  r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
653
- retry: NotRequired[StreamRunAgentModelConfigurationRetryTypedDict]
896
+ retry: NotRequired[StreamRunAgentModelConfigurationAgentsRetryTypedDict]
654
897
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
655
898
 
656
899
 
@@ -666,9 +909,25 @@ class StreamRunAgentModelConfiguration2(BaseModel):
666
909
  parameters: Optional[StreamRunAgentModelConfigurationParameters] = None
667
910
  r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
668
911
 
669
- retry: Optional[StreamRunAgentModelConfigurationRetry] = None
912
+ retry: Optional[StreamRunAgentModelConfigurationAgentsRetry] = None
670
913
  r"""Retry configuration for model requests. Retries are triggered for specific HTTP status codes (e.g., 500, 429, 502, 503, 504). Supports configurable retry count (1-5) and custom status codes."""
671
914
 
915
+ @model_serializer(mode="wrap")
916
+ def serialize_model(self, handler):
917
+ optional_fields = set(["parameters", "retry"])
918
+ serialized = handler(self)
919
+ m = {}
920
+
921
+ for n, f in type(self).model_fields.items():
922
+ k = f.alias or n
923
+ val = serialized.get(k)
924
+
925
+ if val != UNSET_SENTINEL:
926
+ if val is not None or k not in optional_fields:
927
+ m[k] = val
928
+
929
+ return m
930
+
672
931
 
673
932
  StreamRunAgentModelConfigurationTypedDict = TypeAliasType(
674
933
  "StreamRunAgentModelConfigurationTypedDict",
@@ -756,6 +1015,22 @@ class StreamRunAgentResponseFormatAgentsRequestRequestBodyJSONSchema(BaseModel):
756
1015
  strict: Optional[bool] = False
757
1016
  r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
758
1017
 
1018
+ @model_serializer(mode="wrap")
1019
+ def serialize_model(self, handler):
1020
+ optional_fields = set(["description", "schema", "strict"])
1021
+ serialized = handler(self)
1022
+ m = {}
1023
+
1024
+ for n, f in type(self).model_fields.items():
1025
+ k = f.alias or n
1026
+ val = serialized.get(k)
1027
+
1028
+ if val != UNSET_SENTINEL:
1029
+ if val is not None or k not in optional_fields:
1030
+ m[k] = val
1031
+
1032
+ return m
1033
+
759
1034
 
760
1035
  class StreamRunAgentResponseFormatAgentsRequestJSONSchemaTypedDict(TypedDict):
761
1036
  r"""
@@ -890,6 +1165,22 @@ class StreamRunAgentFallbackModelConfigurationStreamOptions(BaseModel):
890
1165
  include_usage: Optional[bool] = None
891
1166
  r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
892
1167
 
1168
+ @model_serializer(mode="wrap")
1169
+ def serialize_model(self, handler):
1170
+ optional_fields = set(["include_usage"])
1171
+ serialized = handler(self)
1172
+ m = {}
1173
+
1174
+ for n, f in type(self).model_fields.items():
1175
+ k = f.alias or n
1176
+ val = serialized.get(k)
1177
+
1178
+ if val != UNSET_SENTINEL:
1179
+ if val is not None or k not in optional_fields:
1180
+ m[k] = val
1181
+
1182
+ return m
1183
+
893
1184
 
894
1185
  StreamRunAgentFallbackModelConfigurationThinkingTypedDict = TypeAliasType(
895
1186
  "StreamRunAgentFallbackModelConfigurationThinkingTypedDict",
@@ -932,6 +1223,22 @@ class StreamRunAgentToolChoiceAgents2(BaseModel):
932
1223
  type: Optional[StreamRunAgentToolChoiceAgentsType] = None
933
1224
  r"""The type of the tool. Currently, only function is supported."""
934
1225
 
1226
+ @model_serializer(mode="wrap")
1227
+ def serialize_model(self, handler):
1228
+ optional_fields = set(["type"])
1229
+ serialized = handler(self)
1230
+ m = {}
1231
+
1232
+ for n, f in type(self).model_fields.items():
1233
+ k = f.alias or n
1234
+ val = serialized.get(k)
1235
+
1236
+ if val != UNSET_SENTINEL:
1237
+ if val is not None or k not in optional_fields:
1238
+ m[k] = val
1239
+
1240
+ return m
1241
+
935
1242
 
936
1243
  StreamRunAgentToolChoiceAgents1 = Literal[
937
1244
  "none",
@@ -999,66 +1306,216 @@ class StreamRunAgentFallbackModelConfigurationGuardrails(BaseModel):
999
1306
  r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1000
1307
 
1001
1308
 
1002
- class StreamRunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
1003
- r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1309
+ class StreamRunAgentFallbackModelConfigurationFallbacksTypedDict(TypedDict):
1310
+ model: str
1311
+ r"""Fallback model identifier"""
1004
1312
 
1005
- audio: NotRequired[Nullable[StreamRunAgentFallbackModelConfigurationAudioTypedDict]]
1006
- r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1007
- frequency_penalty: NotRequired[Nullable[float]]
1008
- r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
1009
- max_tokens: NotRequired[Nullable[int]]
1010
- r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
1011
1313
 
1012
- This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
1013
- """
1014
- max_completion_tokens: NotRequired[Nullable[int]]
1015
- r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
1016
- logprobs: NotRequired[Nullable[bool]]
1017
- r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
1018
- top_logprobs: NotRequired[Nullable[int]]
1019
- r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
1020
- n: NotRequired[Nullable[int]]
1021
- r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
1022
- presence_penalty: NotRequired[Nullable[float]]
1023
- r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
1024
- response_format: NotRequired[
1025
- StreamRunAgentFallbackModelConfigurationResponseFormatTypedDict
1026
- ]
1027
- r"""An object specifying the format that the model must output"""
1028
- reasoning_effort: NotRequired[
1029
- StreamRunAgentFallbackModelConfigurationReasoningEffort
1030
- ]
1031
- r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1314
+ class StreamRunAgentFallbackModelConfigurationFallbacks(BaseModel):
1315
+ model: str
1316
+ r"""Fallback model identifier"""
1032
1317
 
1033
- - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1034
- - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1035
- - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1036
- - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1037
1318
 
1038
- Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1039
- """
1040
- verbosity: NotRequired[str]
1041
- r"""Adjusts response verbosity. Lower levels yield shorter answers."""
1042
- seed: NotRequired[Nullable[float]]
1043
- r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
1044
- stop: NotRequired[Nullable[StreamRunAgentFallbackModelConfigurationStopTypedDict]]
1045
- r"""Up to 4 sequences where the API will stop generating further tokens."""
1046
- stream_options: NotRequired[
1047
- Nullable[StreamRunAgentFallbackModelConfigurationStreamOptionsTypedDict]
1048
- ]
1049
- r"""Options for streaming response. Only set this when you set stream: true."""
1050
- thinking: NotRequired[StreamRunAgentFallbackModelConfigurationThinkingTypedDict]
1051
- temperature: NotRequired[Nullable[float]]
1052
- r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
1053
- top_p: NotRequired[Nullable[float]]
1054
- r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
1055
- top_k: NotRequired[Nullable[float]]
1056
- r"""Limits the model to consider only the top k most likely tokens at each step."""
1057
- tool_choice: NotRequired[
1058
- StreamRunAgentFallbackModelConfigurationToolChoiceTypedDict
1059
- ]
1060
- r"""Controls which (if any) tool is called by the model."""
1061
- parallel_tool_calls: NotRequired[bool]
1319
+ class StreamRunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
1320
+ r"""Retry configuration for the request"""
1321
+
1322
+ count: NotRequired[float]
1323
+ r"""Number of retry attempts (1-5)"""
1324
+ on_codes: NotRequired[List[float]]
1325
+ r"""HTTP status codes that trigger retry logic"""
1326
+
1327
+
1328
+ class StreamRunAgentFallbackModelConfigurationRetry(BaseModel):
1329
+ r"""Retry configuration for the request"""
1330
+
1331
+ count: Optional[float] = 3
1332
+ r"""Number of retry attempts (1-5)"""
1333
+
1334
+ on_codes: Optional[List[float]] = None
1335
+ r"""HTTP status codes that trigger retry logic"""
1336
+
1337
+ @model_serializer(mode="wrap")
1338
+ def serialize_model(self, handler):
1339
+ optional_fields = set(["count", "on_codes"])
1340
+ serialized = handler(self)
1341
+ m = {}
1342
+
1343
+ for n, f in type(self).model_fields.items():
1344
+ k = f.alias or n
1345
+ val = serialized.get(k)
1346
+
1347
+ if val != UNSET_SENTINEL:
1348
+ if val is not None or k not in optional_fields:
1349
+ m[k] = val
1350
+
1351
+ return m
1352
+
1353
+
1354
+ StreamRunAgentFallbackModelConfigurationType = Literal["exact_match",]
1355
+
1356
+
1357
+ class StreamRunAgentFallbackModelConfigurationCacheTypedDict(TypedDict):
1358
+ r"""Cache configuration for the request."""
1359
+
1360
+ type: StreamRunAgentFallbackModelConfigurationType
1361
+ ttl: NotRequired[float]
1362
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1363
+
1364
+
1365
+ class StreamRunAgentFallbackModelConfigurationCache(BaseModel):
1366
+ r"""Cache configuration for the request."""
1367
+
1368
+ type: StreamRunAgentFallbackModelConfigurationType
1369
+
1370
+ ttl: Optional[float] = 1800
1371
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1372
+
1373
+ @model_serializer(mode="wrap")
1374
+ def serialize_model(self, handler):
1375
+ optional_fields = set(["ttl"])
1376
+ serialized = handler(self)
1377
+ m = {}
1378
+
1379
+ for n, f in type(self).model_fields.items():
1380
+ k = f.alias or n
1381
+ val = serialized.get(k)
1382
+
1383
+ if val != UNSET_SENTINEL:
1384
+ if val is not None or k not in optional_fields:
1385
+ m[k] = val
1386
+
1387
+ return m
1388
+
1389
+
1390
+ StreamRunAgentLoadBalancerAgentsType = Literal["weight_based",]
1391
+
1392
+
1393
+ class StreamRunAgentLoadBalancerAgentsModelsTypedDict(TypedDict):
1394
+ model: str
1395
+ r"""Model identifier for load balancing"""
1396
+ weight: NotRequired[float]
1397
+ r"""Weight assigned to this model for load balancing"""
1398
+
1399
+
1400
+ class StreamRunAgentLoadBalancerAgentsModels(BaseModel):
1401
+ model: str
1402
+ r"""Model identifier for load balancing"""
1403
+
1404
+ weight: Optional[float] = 0.5
1405
+ r"""Weight assigned to this model for load balancing"""
1406
+
1407
+ @model_serializer(mode="wrap")
1408
+ def serialize_model(self, handler):
1409
+ optional_fields = set(["weight"])
1410
+ serialized = handler(self)
1411
+ m = {}
1412
+
1413
+ for n, f in type(self).model_fields.items():
1414
+ k = f.alias or n
1415
+ val = serialized.get(k)
1416
+
1417
+ if val != UNSET_SENTINEL:
1418
+ if val is not None or k not in optional_fields:
1419
+ m[k] = val
1420
+
1421
+ return m
1422
+
1423
+
1424
+ class StreamRunAgentLoadBalancerAgents1TypedDict(TypedDict):
1425
+ type: StreamRunAgentLoadBalancerAgentsType
1426
+ models: List[StreamRunAgentLoadBalancerAgentsModelsTypedDict]
1427
+
1428
+
1429
+ class StreamRunAgentLoadBalancerAgents1(BaseModel):
1430
+ type: StreamRunAgentLoadBalancerAgentsType
1431
+
1432
+ models: List[StreamRunAgentLoadBalancerAgentsModels]
1433
+
1434
+
1435
+ StreamRunAgentFallbackModelConfigurationLoadBalancerTypedDict = (
1436
+ StreamRunAgentLoadBalancerAgents1TypedDict
1437
+ )
1438
+ r"""Load balancer configuration for the request."""
1439
+
1440
+
1441
+ StreamRunAgentFallbackModelConfigurationLoadBalancer = StreamRunAgentLoadBalancerAgents1
1442
+ r"""Load balancer configuration for the request."""
1443
+
1444
+
1445
+ class StreamRunAgentFallbackModelConfigurationTimeoutTypedDict(TypedDict):
1446
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1447
+
1448
+ call_timeout: float
1449
+ r"""Timeout value in milliseconds"""
1450
+
1451
+
1452
+ class StreamRunAgentFallbackModelConfigurationTimeout(BaseModel):
1453
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1454
+
1455
+ call_timeout: float
1456
+ r"""Timeout value in milliseconds"""
1457
+
1458
+
1459
+ class StreamRunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
1460
+ r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1461
+
1462
+ audio: NotRequired[Nullable[StreamRunAgentFallbackModelConfigurationAudioTypedDict]]
1463
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1464
+ frequency_penalty: NotRequired[Nullable[float]]
1465
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
1466
+ max_tokens: NotRequired[Nullable[int]]
1467
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
1468
+
1469
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
1470
+ """
1471
+ max_completion_tokens: NotRequired[Nullable[int]]
1472
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
1473
+ logprobs: NotRequired[Nullable[bool]]
1474
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
1475
+ top_logprobs: NotRequired[Nullable[int]]
1476
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
1477
+ n: NotRequired[Nullable[int]]
1478
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
1479
+ presence_penalty: NotRequired[Nullable[float]]
1480
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
1481
+ response_format: NotRequired[
1482
+ StreamRunAgentFallbackModelConfigurationResponseFormatTypedDict
1483
+ ]
1484
+ r"""An object specifying the format that the model must output"""
1485
+ reasoning_effort: NotRequired[
1486
+ StreamRunAgentFallbackModelConfigurationReasoningEffort
1487
+ ]
1488
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1489
+
1490
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1491
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1492
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1493
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1494
+
1495
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1496
+ """
1497
+ verbosity: NotRequired[str]
1498
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
1499
+ seed: NotRequired[Nullable[float]]
1500
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
1501
+ stop: NotRequired[Nullable[StreamRunAgentFallbackModelConfigurationStopTypedDict]]
1502
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1503
+ stream_options: NotRequired[
1504
+ Nullable[StreamRunAgentFallbackModelConfigurationStreamOptionsTypedDict]
1505
+ ]
1506
+ r"""Options for streaming response. Only set this when you set stream: true."""
1507
+ thinking: NotRequired[StreamRunAgentFallbackModelConfigurationThinkingTypedDict]
1508
+ temperature: NotRequired[Nullable[float]]
1509
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
1510
+ top_p: NotRequired[Nullable[float]]
1511
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
1512
+ top_k: NotRequired[Nullable[float]]
1513
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
1514
+ tool_choice: NotRequired[
1515
+ StreamRunAgentFallbackModelConfigurationToolChoiceTypedDict
1516
+ ]
1517
+ r"""Controls which (if any) tool is called by the model."""
1518
+ parallel_tool_calls: NotRequired[bool]
1062
1519
  r"""Whether to enable parallel function calling during tool use."""
1063
1520
  modalities: NotRequired[
1064
1521
  Nullable[List[StreamRunAgentFallbackModelConfigurationModalities]]
@@ -1068,6 +1525,20 @@ class StreamRunAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
1068
1525
  List[StreamRunAgentFallbackModelConfigurationGuardrailsTypedDict]
1069
1526
  ]
1070
1527
  r"""A list of guardrails to apply to the request."""
1528
+ fallbacks: NotRequired[
1529
+ List[StreamRunAgentFallbackModelConfigurationFallbacksTypedDict]
1530
+ ]
1531
+ r"""Array of fallback models to use if primary model fails"""
1532
+ retry: NotRequired[StreamRunAgentFallbackModelConfigurationRetryTypedDict]
1533
+ r"""Retry configuration for the request"""
1534
+ cache: NotRequired[StreamRunAgentFallbackModelConfigurationCacheTypedDict]
1535
+ r"""Cache configuration for the request."""
1536
+ load_balancer: NotRequired[
1537
+ StreamRunAgentFallbackModelConfigurationLoadBalancerTypedDict
1538
+ ]
1539
+ r"""Load balancer configuration for the request."""
1540
+ timeout: NotRequired[StreamRunAgentFallbackModelConfigurationTimeoutTypedDict]
1541
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1071
1542
 
1072
1543
 
1073
1544
  class StreamRunAgentFallbackModelConfigurationParameters(BaseModel):
@@ -1159,77 +1630,96 @@ class StreamRunAgentFallbackModelConfigurationParameters(BaseModel):
1159
1630
  )
1160
1631
  r"""A list of guardrails to apply to the request."""
1161
1632
 
1633
+ fallbacks: Optional[List[StreamRunAgentFallbackModelConfigurationFallbacks]] = None
1634
+ r"""Array of fallback models to use if primary model fails"""
1635
+
1636
+ retry: Optional[StreamRunAgentFallbackModelConfigurationRetry] = None
1637
+ r"""Retry configuration for the request"""
1638
+
1639
+ cache: Optional[StreamRunAgentFallbackModelConfigurationCache] = None
1640
+ r"""Cache configuration for the request."""
1641
+
1642
+ load_balancer: Optional[StreamRunAgentFallbackModelConfigurationLoadBalancer] = None
1643
+ r"""Load balancer configuration for the request."""
1644
+
1645
+ timeout: Optional[StreamRunAgentFallbackModelConfigurationTimeout] = None
1646
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1647
+
1162
1648
  @model_serializer(mode="wrap")
1163
1649
  def serialize_model(self, handler):
1164
- optional_fields = [
1165
- "audio",
1166
- "frequency_penalty",
1167
- "max_tokens",
1168
- "max_completion_tokens",
1169
- "logprobs",
1170
- "top_logprobs",
1171
- "n",
1172
- "presence_penalty",
1173
- "response_format",
1174
- "reasoning_effort",
1175
- "verbosity",
1176
- "seed",
1177
- "stop",
1178
- "stream_options",
1179
- "thinking",
1180
- "temperature",
1181
- "top_p",
1182
- "top_k",
1183
- "tool_choice",
1184
- "parallel_tool_calls",
1185
- "modalities",
1186
- "guardrails",
1187
- ]
1188
- nullable_fields = [
1189
- "audio",
1190
- "frequency_penalty",
1191
- "max_tokens",
1192
- "max_completion_tokens",
1193
- "logprobs",
1194
- "top_logprobs",
1195
- "n",
1196
- "presence_penalty",
1197
- "seed",
1198
- "stop",
1199
- "stream_options",
1200
- "temperature",
1201
- "top_p",
1202
- "top_k",
1203
- "modalities",
1204
- ]
1205
- null_default_fields = []
1206
-
1650
+ optional_fields = set(
1651
+ [
1652
+ "audio",
1653
+ "frequency_penalty",
1654
+ "max_tokens",
1655
+ "max_completion_tokens",
1656
+ "logprobs",
1657
+ "top_logprobs",
1658
+ "n",
1659
+ "presence_penalty",
1660
+ "response_format",
1661
+ "reasoning_effort",
1662
+ "verbosity",
1663
+ "seed",
1664
+ "stop",
1665
+ "stream_options",
1666
+ "thinking",
1667
+ "temperature",
1668
+ "top_p",
1669
+ "top_k",
1670
+ "tool_choice",
1671
+ "parallel_tool_calls",
1672
+ "modalities",
1673
+ "guardrails",
1674
+ "fallbacks",
1675
+ "retry",
1676
+ "cache",
1677
+ "load_balancer",
1678
+ "timeout",
1679
+ ]
1680
+ )
1681
+ nullable_fields = set(
1682
+ [
1683
+ "audio",
1684
+ "frequency_penalty",
1685
+ "max_tokens",
1686
+ "max_completion_tokens",
1687
+ "logprobs",
1688
+ "top_logprobs",
1689
+ "n",
1690
+ "presence_penalty",
1691
+ "seed",
1692
+ "stop",
1693
+ "stream_options",
1694
+ "temperature",
1695
+ "top_p",
1696
+ "top_k",
1697
+ "modalities",
1698
+ ]
1699
+ )
1207
1700
  serialized = handler(self)
1208
-
1209
1701
  m = {}
1210
1702
 
1211
1703
  for n, f in type(self).model_fields.items():
1212
1704
  k = f.alias or n
1213
1705
  val = serialized.get(k)
1214
- serialized.pop(k, None)
1215
-
1216
- optional_nullable = k in optional_fields and k in nullable_fields
1217
- is_set = (
1218
- self.__pydantic_fields_set__.intersection({n})
1219
- or k in null_default_fields
1220
- ) # pylint: disable=no-member
1221
-
1222
- if val is not None and val != UNSET_SENTINEL:
1223
- m[k] = val
1224
- elif val != UNSET_SENTINEL and (
1225
- not k in optional_fields or (optional_nullable and is_set)
1226
- ):
1227
- m[k] = val
1706
+ is_nullable_and_explicitly_set = (
1707
+ k in nullable_fields
1708
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1709
+ )
1710
+
1711
+ if val != UNSET_SENTINEL:
1712
+ if (
1713
+ val is not None
1714
+ or k not in optional_fields
1715
+ or is_nullable_and_explicitly_set
1716
+ ):
1717
+ m[k] = val
1228
1718
 
1229
1719
  return m
1230
1720
 
1231
1721
 
1232
- class StreamRunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
1722
+ class StreamRunAgentFallbackModelConfigurationAgentsRetryTypedDict(TypedDict):
1233
1723
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1234
1724
 
1235
1725
  count: NotRequired[float]
@@ -1238,7 +1728,7 @@ class StreamRunAgentFallbackModelConfigurationRetryTypedDict(TypedDict):
1238
1728
  r"""HTTP status codes that trigger retry logic"""
1239
1729
 
1240
1730
 
1241
- class StreamRunAgentFallbackModelConfigurationRetry(BaseModel):
1731
+ class StreamRunAgentFallbackModelConfigurationAgentsRetry(BaseModel):
1242
1732
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1243
1733
 
1244
1734
  count: Optional[float] = 3
@@ -1247,6 +1737,22 @@ class StreamRunAgentFallbackModelConfigurationRetry(BaseModel):
1247
1737
  on_codes: Optional[List[float]] = None
1248
1738
  r"""HTTP status codes that trigger retry logic"""
1249
1739
 
1740
+ @model_serializer(mode="wrap")
1741
+ def serialize_model(self, handler):
1742
+ optional_fields = set(["count", "on_codes"])
1743
+ serialized = handler(self)
1744
+ m = {}
1745
+
1746
+ for n, f in type(self).model_fields.items():
1747
+ k = f.alias or n
1748
+ val = serialized.get(k)
1749
+
1750
+ if val != UNSET_SENTINEL:
1751
+ if val is not None or k not in optional_fields:
1752
+ m[k] = val
1753
+
1754
+ return m
1755
+
1250
1756
 
1251
1757
  class StreamRunAgentFallbackModelConfiguration2TypedDict(TypedDict):
1252
1758
  r"""Fallback model configuration with optional parameters and retry settings."""
@@ -1255,7 +1761,7 @@ class StreamRunAgentFallbackModelConfiguration2TypedDict(TypedDict):
1255
1761
  r"""A fallback model ID string. Must support tool calling."""
1256
1762
  parameters: NotRequired[StreamRunAgentFallbackModelConfigurationParametersTypedDict]
1257
1763
  r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1258
- retry: NotRequired[StreamRunAgentFallbackModelConfigurationRetryTypedDict]
1764
+ retry: NotRequired[StreamRunAgentFallbackModelConfigurationAgentsRetryTypedDict]
1259
1765
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1260
1766
 
1261
1767
 
@@ -1268,9 +1774,25 @@ class StreamRunAgentFallbackModelConfiguration2(BaseModel):
1268
1774
  parameters: Optional[StreamRunAgentFallbackModelConfigurationParameters] = None
1269
1775
  r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1270
1776
 
1271
- retry: Optional[StreamRunAgentFallbackModelConfigurationRetry] = None
1777
+ retry: Optional[StreamRunAgentFallbackModelConfigurationAgentsRetry] = None
1272
1778
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1273
1779
 
1780
+ @model_serializer(mode="wrap")
1781
+ def serialize_model(self, handler):
1782
+ optional_fields = set(["parameters", "retry"])
1783
+ serialized = handler(self)
1784
+ m = {}
1785
+
1786
+ for n, f in type(self).model_fields.items():
1787
+ k = f.alias or n
1788
+ val = serialized.get(k)
1789
+
1790
+ if val != UNSET_SENTINEL:
1791
+ if val is not None or k not in optional_fields:
1792
+ m[k] = val
1793
+
1794
+ return m
1795
+
1274
1796
 
1275
1797
  StreamRunAgentFallbackModelConfigurationTypedDict = TypeAliasType(
1276
1798
  "StreamRunAgentFallbackModelConfigurationTypedDict",
@@ -1355,6 +1877,22 @@ class StreamRunAgentA2AMessage(BaseModel):
1355
1877
  message_id: Annotated[Optional[str], pydantic.Field(alias="messageId")] = None
1356
1878
  r"""Optional A2A message ID in ULID format"""
1357
1879
 
1880
+ @model_serializer(mode="wrap")
1881
+ def serialize_model(self, handler):
1882
+ optional_fields = set(["messageId"])
1883
+ serialized = handler(self)
1884
+ m = {}
1885
+
1886
+ for n, f in type(self).model_fields.items():
1887
+ k = f.alias or n
1888
+ val = serialized.get(k)
1889
+
1890
+ if val != UNSET_SENTINEL:
1891
+ if val is not None or k not in optional_fields:
1892
+ m[k] = val
1893
+
1894
+ return m
1895
+
1358
1896
 
1359
1897
  class StreamRunAgentIdentityTypedDict(TypedDict):
1360
1898
  r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
@@ -1394,6 +1932,22 @@ class StreamRunAgentIdentity(BaseModel):
1394
1932
  tags: Optional[List[str]] = None
1395
1933
  r"""A list of tags associated with the contact"""
1396
1934
 
1935
+ @model_serializer(mode="wrap")
1936
+ def serialize_model(self, handler):
1937
+ optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
1938
+ serialized = handler(self)
1939
+ m = {}
1940
+
1941
+ for n, f in type(self).model_fields.items():
1942
+ k = f.alias or n
1943
+ val = serialized.get(k)
1944
+
1945
+ if val != UNSET_SENTINEL:
1946
+ if val is not None or k not in optional_fields:
1947
+ m[k] = val
1948
+
1949
+ return m
1950
+
1397
1951
 
1398
1952
  @deprecated(
1399
1953
  "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
@@ -1439,6 +1993,22 @@ class StreamRunAgentContact(BaseModel):
1439
1993
  tags: Optional[List[str]] = None
1440
1994
  r"""A list of tags associated with the contact"""
1441
1995
 
1996
+ @model_serializer(mode="wrap")
1997
+ def serialize_model(self, handler):
1998
+ optional_fields = set(["display_name", "email", "metadata", "logo_url", "tags"])
1999
+ serialized = handler(self)
2000
+ m = {}
2001
+
2002
+ for n, f in type(self).model_fields.items():
2003
+ k = f.alias or n
2004
+ val = serialized.get(k)
2005
+
2006
+ if val != UNSET_SENTINEL:
2007
+ if val is not None or k not in optional_fields:
2008
+ m[k] = val
2009
+
2010
+ return m
2011
+
1442
2012
 
1443
2013
  class StreamRunAgentThreadTypedDict(TypedDict):
1444
2014
  r"""Thread information to group related requests"""
@@ -1458,6 +2028,22 @@ class StreamRunAgentThread(BaseModel):
1458
2028
  tags: Optional[List[str]] = None
1459
2029
  r"""Optional tags to differentiate or categorize threads"""
1460
2030
 
2031
+ @model_serializer(mode="wrap")
2032
+ def serialize_model(self, handler):
2033
+ optional_fields = set(["tags"])
2034
+ serialized = handler(self)
2035
+ m = {}
2036
+
2037
+ for n, f in type(self).model_fields.items():
2038
+ k = f.alias or n
2039
+ val = serialized.get(k)
2040
+
2041
+ if val != UNSET_SENTINEL:
2042
+ if val is not None or k not in optional_fields:
2043
+ m[k] = val
2044
+
2045
+ return m
2046
+
1461
2047
 
1462
2048
  class StreamRunAgentMemoryTypedDict(TypedDict):
1463
2049
  r"""Memory configuration for the agent execution. Used to associate memory stores with specific entities like users or sessions."""
@@ -1497,8 +2083,24 @@ class StreamRunAgentTeamOfAgents(BaseModel):
1497
2083
  role: Optional[str] = None
1498
2084
  r"""The role of the agent in this context. This is used to give extra information to the leader to help it decide which agent to hand off to."""
1499
2085
 
2086
+ @model_serializer(mode="wrap")
2087
+ def serialize_model(self, handler):
2088
+ optional_fields = set(["role"])
2089
+ serialized = handler(self)
2090
+ m = {}
2091
+
2092
+ for n, f in type(self).model_fields.items():
2093
+ k = f.alias or n
2094
+ val = serialized.get(k)
1500
2095
 
1501
- StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type = Literal[
2096
+ if val != UNSET_SENTINEL:
2097
+ if val is not None or k not in optional_fields:
2098
+ m[k] = val
2099
+
2100
+ return m
2101
+
2102
+
2103
+ StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type = Literal[
1502
2104
  "mcp",
1503
2105
  ]
1504
2106
 
@@ -1513,29 +2115,61 @@ class StreamRunAgentAgentToolInputRunAgentsHeaders(BaseModel):
1513
2115
 
1514
2116
  encrypted: Optional[bool] = False
1515
2117
 
2118
+ @model_serializer(mode="wrap")
2119
+ def serialize_model(self, handler):
2120
+ optional_fields = set(["encrypted"])
2121
+ serialized = handler(self)
2122
+ m = {}
2123
+
2124
+ for n, f in type(self).model_fields.items():
2125
+ k = f.alias or n
2126
+ val = serialized.get(k)
2127
+
2128
+ if val != UNSET_SENTINEL:
2129
+ if val is not None or k not in optional_fields:
2130
+ m[k] = val
2131
+
2132
+ return m
2133
+
1516
2134
 
1517
- StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15McpType = Literal[
2135
+ StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType = Literal[
1518
2136
  "object",
1519
2137
  ]
1520
2138
 
1521
2139
 
1522
- class AgentToolInputRunSchemaTypedDict(TypedDict):
1523
- type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15McpType
2140
+ class StreamRunAgentAgentToolInputRunAgentsSchemaTypedDict(TypedDict):
2141
+ type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
1524
2142
  properties: NotRequired[Dict[str, Any]]
1525
2143
  required: NotRequired[List[str]]
1526
2144
 
1527
2145
 
1528
- class AgentToolInputRunSchema(BaseModel):
1529
- type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15McpType
2146
+ class StreamRunAgentAgentToolInputRunAgentsSchema(BaseModel):
2147
+ type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16McpType
1530
2148
 
1531
2149
  properties: Optional[Dict[str, Any]] = None
1532
2150
 
1533
2151
  required: Optional[List[str]] = None
1534
2152
 
2153
+ @model_serializer(mode="wrap")
2154
+ def serialize_model(self, handler):
2155
+ optional_fields = set(["properties", "required"])
2156
+ serialized = handler(self)
2157
+ m = {}
2158
+
2159
+ for n, f in type(self).model_fields.items():
2160
+ k = f.alias or n
2161
+ val = serialized.get(k)
2162
+
2163
+ if val != UNSET_SENTINEL:
2164
+ if val is not None or k not in optional_fields:
2165
+ m[k] = val
2166
+
2167
+ return m
2168
+
1535
2169
 
1536
2170
  class AgentToolInputRunToolsTypedDict(TypedDict):
1537
2171
  name: str
1538
- schema_: AgentToolInputRunSchemaTypedDict
2172
+ schema_: StreamRunAgentAgentToolInputRunAgentsSchemaTypedDict
1539
2173
  id: NotRequired[str]
1540
2174
  description: NotRequired[str]
1541
2175
 
@@ -1543,12 +2177,30 @@ class AgentToolInputRunToolsTypedDict(TypedDict):
1543
2177
  class AgentToolInputRunTools(BaseModel):
1544
2178
  name: str
1545
2179
 
1546
- schema_: Annotated[AgentToolInputRunSchema, pydantic.Field(alias="schema")]
2180
+ schema_: Annotated[
2181
+ StreamRunAgentAgentToolInputRunAgentsSchema, pydantic.Field(alias="schema")
2182
+ ]
1547
2183
 
1548
- id: Optional[str] = "01KEXRJ7NDSYG7V3YEZKK55TE9"
2184
+ id: Optional[str] = "01KFTTTR0XG9Q2WCVPG1AD9NB7"
1549
2185
 
1550
2186
  description: Optional[str] = None
1551
2187
 
2188
+ @model_serializer(mode="wrap")
2189
+ def serialize_model(self, handler):
2190
+ optional_fields = set(["id", "description"])
2191
+ serialized = handler(self)
2192
+ m = {}
2193
+
2194
+ for n, f in type(self).model_fields.items():
2195
+ k = f.alias or n
2196
+ val = serialized.get(k)
2197
+
2198
+ if val != UNSET_SENTINEL:
2199
+ if val is not None or k not in optional_fields:
2200
+ m[k] = val
2201
+
2202
+ return m
2203
+
1552
2204
 
1553
2205
  AgentToolInputRunConnectionType = Literal[
1554
2206
  "http",
@@ -1583,11 +2235,27 @@ class AgentToolInputRunMcp(BaseModel):
1583
2235
  headers: Optional[Dict[str, StreamRunAgentAgentToolInputRunAgentsHeaders]] = None
1584
2236
  r"""HTTP headers for MCP server requests with encryption support"""
1585
2237
 
2238
+ @model_serializer(mode="wrap")
2239
+ def serialize_model(self, handler):
2240
+ optional_fields = set(["headers"])
2241
+ serialized = handler(self)
2242
+ m = {}
2243
+
2244
+ for n, f in type(self).model_fields.items():
2245
+ k = f.alias or n
2246
+ val = serialized.get(k)
2247
+
2248
+ if val != UNSET_SENTINEL:
2249
+ if val is not None or k not in optional_fields:
2250
+ m[k] = val
2251
+
2252
+ return m
2253
+
1586
2254
 
1587
2255
  class AgentToolInputRunMCPToolRunTypedDict(TypedDict):
1588
2256
  r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
1589
2257
 
1590
- type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2258
+ type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
1591
2259
  key: str
1592
2260
  r"""Unique key of the tool as it will be displayed in the UI"""
1593
2261
  description: str
@@ -1601,7 +2269,7 @@ class AgentToolInputRunMCPToolRunTypedDict(TypedDict):
1601
2269
  class AgentToolInputRunMCPToolRun(BaseModel):
1602
2270
  r"""MCP tool with inline definition for on-the-fly creation in run endpoint"""
1603
2271
 
1604
- type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2272
+ type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools16Type
1605
2273
 
1606
2274
  key: str
1607
2275
  r"""Unique key of the tool as it will be displayed in the UI"""
@@ -1617,6 +2285,157 @@ class AgentToolInputRunMCPToolRun(BaseModel):
1617
2285
 
1618
2286
  requires_approval: Optional[bool] = False
1619
2287
 
2288
+ @model_serializer(mode="wrap")
2289
+ def serialize_model(self, handler):
2290
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2291
+ serialized = handler(self)
2292
+ m = {}
2293
+
2294
+ for n, f in type(self).model_fields.items():
2295
+ k = f.alias or n
2296
+ val = serialized.get(k)
2297
+
2298
+ if val != UNSET_SENTINEL:
2299
+ if val is not None or k not in optional_fields:
2300
+ m[k] = val
2301
+
2302
+ return m
2303
+
2304
+
2305
+ StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type = Literal[
2306
+ "json_schema",
2307
+ ]
2308
+
2309
+
2310
+ class StreamRunAgentAgentToolInputRunSchemaTypedDict(TypedDict):
2311
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2312
+
2313
+ type: str
2314
+ r"""The JSON Schema type"""
2315
+ properties: Dict[str, Any]
2316
+ r"""The properties of the JSON Schema object"""
2317
+ required: List[str]
2318
+ r"""Array of required property names"""
2319
+
2320
+
2321
+ class StreamRunAgentAgentToolInputRunSchema(BaseModel):
2322
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2323
+
2324
+ model_config = ConfigDict(
2325
+ populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
2326
+ )
2327
+ __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
2328
+
2329
+ type: str
2330
+ r"""The JSON Schema type"""
2331
+
2332
+ properties: Dict[str, Any]
2333
+ r"""The properties of the JSON Schema object"""
2334
+
2335
+ required: List[str]
2336
+ r"""Array of required property names"""
2337
+
2338
+ @property
2339
+ def additional_properties(self):
2340
+ return self.__pydantic_extra__
2341
+
2342
+ @additional_properties.setter
2343
+ def additional_properties(self, value):
2344
+ self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
2345
+
2346
+
2347
+ class StreamRunAgentAgentToolInputRunJSONSchemaTypedDict(TypedDict):
2348
+ name: str
2349
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
2350
+ description: str
2351
+ r"""A description of what the response format is for. This will be shown to the user."""
2352
+ schema_: StreamRunAgentAgentToolInputRunSchemaTypedDict
2353
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2354
+ strict: NotRequired[bool]
2355
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
2356
+
2357
+
2358
+ class StreamRunAgentAgentToolInputRunJSONSchema(BaseModel):
2359
+ name: str
2360
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
2361
+
2362
+ description: str
2363
+ r"""A description of what the response format is for. This will be shown to the user."""
2364
+
2365
+ schema_: Annotated[
2366
+ StreamRunAgentAgentToolInputRunSchema, pydantic.Field(alias="schema")
2367
+ ]
2368
+ r"""The schema for the response format, described as a JSON Schema object. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2369
+
2370
+ strict: Optional[bool] = None
2371
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. Only compatible with `OpenAI` models."""
2372
+
2373
+ @model_serializer(mode="wrap")
2374
+ def serialize_model(self, handler):
2375
+ optional_fields = set(["strict"])
2376
+ serialized = handler(self)
2377
+ m = {}
2378
+
2379
+ for n, f in type(self).model_fields.items():
2380
+ k = f.alias or n
2381
+ val = serialized.get(k)
2382
+
2383
+ if val != UNSET_SENTINEL:
2384
+ if val is not None or k not in optional_fields:
2385
+ m[k] = val
2386
+
2387
+ return m
2388
+
2389
+
2390
+ class AgentToolInputRunJSONSchemaToolRunTypedDict(TypedDict):
2391
+ r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
2392
+
2393
+ type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2394
+ key: str
2395
+ r"""Unique key of the tool as it will be displayed in the UI"""
2396
+ description: str
2397
+ r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
2398
+ json_schema: StreamRunAgentAgentToolInputRunJSONSchemaTypedDict
2399
+ id: NotRequired[str]
2400
+ display_name: NotRequired[str]
2401
+ requires_approval: NotRequired[bool]
2402
+
2403
+
2404
+ class AgentToolInputRunJSONSchemaToolRun(BaseModel):
2405
+ r"""JSON Schema tool with inline definition for on-the-fly creation in run endpoint"""
2406
+
2407
+ type: StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools15Type
2408
+
2409
+ key: str
2410
+ r"""Unique key of the tool as it will be displayed in the UI"""
2411
+
2412
+ description: str
2413
+ r"""A description of the tool, used by the model to choose when and how to call the tool. We do recommend using the `description` field as accurate as possible to give enough context to the model to make the right decision."""
2414
+
2415
+ json_schema: StreamRunAgentAgentToolInputRunJSONSchema
2416
+
2417
+ id: Annotated[Optional[str], pydantic.Field(alias="_id")] = None
2418
+
2419
+ display_name: Optional[str] = None
2420
+
2421
+ requires_approval: Optional[bool] = False
2422
+
2423
+ @model_serializer(mode="wrap")
2424
+ def serialize_model(self, handler):
2425
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2426
+ serialized = handler(self)
2427
+ m = {}
2428
+
2429
+ for n, f in type(self).model_fields.items():
2430
+ k = f.alias or n
2431
+ val = serialized.get(k)
2432
+
2433
+ if val != UNSET_SENTINEL:
2434
+ if val is not None or k not in optional_fields:
2435
+ m[k] = val
2436
+
2437
+ return m
2438
+
1620
2439
 
1621
2440
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools14Type = Literal[
1622
2441
  "function",
@@ -1690,6 +2509,22 @@ class StreamRunAgentAgentToolInputRunFunction(BaseModel):
1690
2509
  parameters: Optional[StreamRunAgentAgentToolInputRunAgentsParameters] = None
1691
2510
  r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
1692
2511
 
2512
+ @model_serializer(mode="wrap")
2513
+ def serialize_model(self, handler):
2514
+ optional_fields = set(["description", "strict", "parameters"])
2515
+ serialized = handler(self)
2516
+ m = {}
2517
+
2518
+ for n, f in type(self).model_fields.items():
2519
+ k = f.alias or n
2520
+ val = serialized.get(k)
2521
+
2522
+ if val != UNSET_SENTINEL:
2523
+ if val is not None or k not in optional_fields:
2524
+ m[k] = val
2525
+
2526
+ return m
2527
+
1693
2528
 
1694
2529
  class AgentToolInputRunFunctionToolRunTypedDict(TypedDict):
1695
2530
  r"""Function tool with inline definition for on-the-fly creation in run endpoint"""
@@ -1722,6 +2557,24 @@ class AgentToolInputRunFunctionToolRun(BaseModel):
1722
2557
 
1723
2558
  requires_approval: Optional[bool] = False
1724
2559
 
2560
+ @model_serializer(mode="wrap")
2561
+ def serialize_model(self, handler):
2562
+ optional_fields = set(
2563
+ ["_id", "display_name", "description", "requires_approval"]
2564
+ )
2565
+ serialized = handler(self)
2566
+ m = {}
2567
+
2568
+ for n, f in type(self).model_fields.items():
2569
+ k = f.alias or n
2570
+ val = serialized.get(k)
2571
+
2572
+ if val != UNSET_SENTINEL:
2573
+ if val is not None or k not in optional_fields:
2574
+ m[k] = val
2575
+
2576
+ return m
2577
+
1725
2578
 
1726
2579
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools13Type = Literal[
1727
2580
  "code",
@@ -1788,8 +2641,24 @@ class AgentToolInputRunCodeTool(BaseModel):
1788
2641
  code: str
1789
2642
  r"""The code to execute."""
1790
2643
 
1791
- parameters: Optional[StreamRunAgentAgentToolInputRunParameters] = None
1792
- r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2644
+ parameters: Optional[StreamRunAgentAgentToolInputRunParameters] = None
2645
+ r"""The parameters the functions accepts, described as a JSON Schema object. See the `OpenAI` [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format."""
2646
+
2647
+ @model_serializer(mode="wrap")
2648
+ def serialize_model(self, handler):
2649
+ optional_fields = set(["parameters"])
2650
+ serialized = handler(self)
2651
+ m = {}
2652
+
2653
+ for n, f in type(self).model_fields.items():
2654
+ k = f.alias or n
2655
+ val = serialized.get(k)
2656
+
2657
+ if val != UNSET_SENTINEL:
2658
+ if val is not None or k not in optional_fields:
2659
+ m[k] = val
2660
+
2661
+ return m
1793
2662
 
1794
2663
 
1795
2664
  class AgentToolInputRunCodeToolRunTypedDict(TypedDict):
@@ -1825,6 +2694,22 @@ class AgentToolInputRunCodeToolRun(BaseModel):
1825
2694
 
1826
2695
  requires_approval: Optional[bool] = False
1827
2696
 
2697
+ @model_serializer(mode="wrap")
2698
+ def serialize_model(self, handler):
2699
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2700
+ serialized = handler(self)
2701
+ m = {}
2702
+
2703
+ for n, f in type(self).model_fields.items():
2704
+ k = f.alias or n
2705
+ val = serialized.get(k)
2706
+
2707
+ if val != UNSET_SENTINEL:
2708
+ if val is not None or k not in optional_fields:
2709
+ m[k] = val
2710
+
2711
+ return m
2712
+
1828
2713
 
1829
2714
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12Type = Literal[
1830
2715
  "http",
@@ -1850,6 +2735,22 @@ class StreamRunAgentHeaders2(BaseModel):
1850
2735
 
1851
2736
  encrypted: Optional[bool] = False
1852
2737
 
2738
+ @model_serializer(mode="wrap")
2739
+ def serialize_model(self, handler):
2740
+ optional_fields = set(["encrypted"])
2741
+ serialized = handler(self)
2742
+ m = {}
2743
+
2744
+ for n, f in type(self).model_fields.items():
2745
+ k = f.alias or n
2746
+ val = serialized.get(k)
2747
+
2748
+ if val != UNSET_SENTINEL:
2749
+ if val is not None or k not in optional_fields:
2750
+ m[k] = val
2751
+
2752
+ return m
2753
+
1853
2754
 
1854
2755
  StreamRunAgentAgentToolInputRunHeadersTypedDict = TypeAliasType(
1855
2756
  "StreamRunAgentAgentToolInputRunHeadersTypedDict",
@@ -1890,6 +2791,22 @@ class AgentToolInputRunBlueprint(BaseModel):
1890
2791
  body: Optional[Dict[str, Any]] = None
1891
2792
  r"""The body to send with the request."""
1892
2793
 
2794
+ @model_serializer(mode="wrap")
2795
+ def serialize_model(self, handler):
2796
+ optional_fields = set(["headers", "body"])
2797
+ serialized = handler(self)
2798
+ m = {}
2799
+
2800
+ for n, f in type(self).model_fields.items():
2801
+ k = f.alias or n
2802
+ val = serialized.get(k)
2803
+
2804
+ if val != UNSET_SENTINEL:
2805
+ if val is not None or k not in optional_fields:
2806
+ m[k] = val
2807
+
2808
+ return m
2809
+
1893
2810
 
1894
2811
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools12HTTPType = (
1895
2812
  Literal[
@@ -1937,6 +2854,22 @@ class AgentToolInputRunArguments(BaseModel):
1937
2854
  default_value: Optional[AgentToolInputRunDefaultValue] = None
1938
2855
  r"""The default value of the argument."""
1939
2856
 
2857
+ @model_serializer(mode="wrap")
2858
+ def serialize_model(self, handler):
2859
+ optional_fields = set(["send_to_model", "default_value"])
2860
+ serialized = handler(self)
2861
+ m = {}
2862
+
2863
+ for n, f in type(self).model_fields.items():
2864
+ k = f.alias or n
2865
+ val = serialized.get(k)
2866
+
2867
+ if val != UNSET_SENTINEL:
2868
+ if val is not None or k not in optional_fields:
2869
+ m[k] = val
2870
+
2871
+ return m
2872
+
1940
2873
 
1941
2874
  class AgentToolInputRunHTTPTypedDict(TypedDict):
1942
2875
  blueprint: AgentToolInputRunBlueprintTypedDict
@@ -1952,6 +2885,22 @@ class AgentToolInputRunHTTP(BaseModel):
1952
2885
  arguments: Optional[Dict[str, AgentToolInputRunArguments]] = None
1953
2886
  r"""The arguments to send with the request. The keys will be used to replace the placeholders in the `blueprint` field."""
1954
2887
 
2888
+ @model_serializer(mode="wrap")
2889
+ def serialize_model(self, handler):
2890
+ optional_fields = set(["arguments"])
2891
+ serialized = handler(self)
2892
+ m = {}
2893
+
2894
+ for n, f in type(self).model_fields.items():
2895
+ k = f.alias or n
2896
+ val = serialized.get(k)
2897
+
2898
+ if val != UNSET_SENTINEL:
2899
+ if val is not None or k not in optional_fields:
2900
+ m[k] = val
2901
+
2902
+ return m
2903
+
1955
2904
 
1956
2905
  class AgentToolInputRunHTTPToolRunTypedDict(TypedDict):
1957
2906
  r"""HTTP tool with inline definition for on-the-fly creation in run endpoint"""
@@ -1986,6 +2935,22 @@ class AgentToolInputRunHTTPToolRun(BaseModel):
1986
2935
 
1987
2936
  requires_approval: Optional[bool] = False
1988
2937
 
2938
+ @model_serializer(mode="wrap")
2939
+ def serialize_model(self, handler):
2940
+ optional_fields = set(["_id", "display_name", "requires_approval"])
2941
+ serialized = handler(self)
2942
+ m = {}
2943
+
2944
+ for n, f in type(self).model_fields.items():
2945
+ k = f.alias or n
2946
+ val = serialized.get(k)
2947
+
2948
+ if val != UNSET_SENTINEL:
2949
+ if val is not None or k not in optional_fields:
2950
+ m[k] = val
2951
+
2952
+ return m
2953
+
1989
2954
 
1990
2955
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools11Type = Literal[
1991
2956
  "current_date",
@@ -2008,6 +2973,22 @@ class StreamRunAgentAgentToolInputRunCurrentDateTool(BaseModel):
2008
2973
  requires_approval: Optional[bool] = None
2009
2974
  r"""Whether this tool requires approval before execution"""
2010
2975
 
2976
+ @model_serializer(mode="wrap")
2977
+ def serialize_model(self, handler):
2978
+ optional_fields = set(["requires_approval"])
2979
+ serialized = handler(self)
2980
+ m = {}
2981
+
2982
+ for n, f in type(self).model_fields.items():
2983
+ k = f.alias or n
2984
+ val = serialized.get(k)
2985
+
2986
+ if val != UNSET_SENTINEL:
2987
+ if val is not None or k not in optional_fields:
2988
+ m[k] = val
2989
+
2990
+ return m
2991
+
2011
2992
 
2012
2993
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools10Type = Literal[
2013
2994
  "query_knowledge_base",
@@ -2030,6 +3011,22 @@ class StreamRunAgentAgentToolInputRunQueryKnowledgeBaseTool(BaseModel):
2030
3011
  requires_approval: Optional[bool] = None
2031
3012
  r"""Whether this tool requires approval before execution"""
2032
3013
 
3014
+ @model_serializer(mode="wrap")
3015
+ def serialize_model(self, handler):
3016
+ optional_fields = set(["requires_approval"])
3017
+ serialized = handler(self)
3018
+ m = {}
3019
+
3020
+ for n, f in type(self).model_fields.items():
3021
+ k = f.alias or n
3022
+ val = serialized.get(k)
3023
+
3024
+ if val != UNSET_SENTINEL:
3025
+ if val is not None or k not in optional_fields:
3026
+ m[k] = val
3027
+
3028
+ return m
3029
+
2033
3030
 
2034
3031
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools9Type = Literal[
2035
3032
  "retrieve_knowledge_bases",
@@ -2052,6 +3049,22 @@ class StreamRunAgentAgentToolInputRunRetrieveKnowledgeBasesTool(BaseModel):
2052
3049
  requires_approval: Optional[bool] = None
2053
3050
  r"""Whether this tool requires approval before execution"""
2054
3051
 
3052
+ @model_serializer(mode="wrap")
3053
+ def serialize_model(self, handler):
3054
+ optional_fields = set(["requires_approval"])
3055
+ serialized = handler(self)
3056
+ m = {}
3057
+
3058
+ for n, f in type(self).model_fields.items():
3059
+ k = f.alias or n
3060
+ val = serialized.get(k)
3061
+
3062
+ if val != UNSET_SENTINEL:
3063
+ if val is not None or k not in optional_fields:
3064
+ m[k] = val
3065
+
3066
+ return m
3067
+
2055
3068
 
2056
3069
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools8Type = Literal[
2057
3070
  "delete_memory_document",
@@ -2074,6 +3087,22 @@ class StreamRunAgentAgentToolInputRunDeleteMemoryDocumentTool(BaseModel):
2074
3087
  requires_approval: Optional[bool] = None
2075
3088
  r"""Whether this tool requires approval before execution"""
2076
3089
 
3090
+ @model_serializer(mode="wrap")
3091
+ def serialize_model(self, handler):
3092
+ optional_fields = set(["requires_approval"])
3093
+ serialized = handler(self)
3094
+ m = {}
3095
+
3096
+ for n, f in type(self).model_fields.items():
3097
+ k = f.alias or n
3098
+ val = serialized.get(k)
3099
+
3100
+ if val != UNSET_SENTINEL:
3101
+ if val is not None or k not in optional_fields:
3102
+ m[k] = val
3103
+
3104
+ return m
3105
+
2077
3106
 
2078
3107
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsTools7Type = Literal[
2079
3108
  "retrieve_memory_stores",
@@ -2096,6 +3125,22 @@ class StreamRunAgentAgentToolInputRunRetrieveMemoryStoresTool(BaseModel):
2096
3125
  requires_approval: Optional[bool] = None
2097
3126
  r"""Whether this tool requires approval before execution"""
2098
3127
 
3128
+ @model_serializer(mode="wrap")
3129
+ def serialize_model(self, handler):
3130
+ optional_fields = set(["requires_approval"])
3131
+ serialized = handler(self)
3132
+ m = {}
3133
+
3134
+ for n, f in type(self).model_fields.items():
3135
+ k = f.alias or n
3136
+ val = serialized.get(k)
3137
+
3138
+ if val != UNSET_SENTINEL:
3139
+ if val is not None or k not in optional_fields:
3140
+ m[k] = val
3141
+
3142
+ return m
3143
+
2099
3144
 
2100
3145
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsToolsType = Literal[
2101
3146
  "write_memory_store",
@@ -2118,6 +3163,22 @@ class StreamRunAgentAgentToolInputRunWriteMemoryStoreTool(BaseModel):
2118
3163
  requires_approval: Optional[bool] = None
2119
3164
  r"""Whether this tool requires approval before execution"""
2120
3165
 
3166
+ @model_serializer(mode="wrap")
3167
+ def serialize_model(self, handler):
3168
+ optional_fields = set(["requires_approval"])
3169
+ serialized = handler(self)
3170
+ m = {}
3171
+
3172
+ for n, f in type(self).model_fields.items():
3173
+ k = f.alias or n
3174
+ val = serialized.get(k)
3175
+
3176
+ if val != UNSET_SENTINEL:
3177
+ if val is not None or k not in optional_fields:
3178
+ m[k] = val
3179
+
3180
+ return m
3181
+
2121
3182
 
2122
3183
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodySettingsType = Literal[
2123
3184
  "query_memory_store",
@@ -2140,6 +3201,22 @@ class StreamRunAgentAgentToolInputRunQueryMemoryStoreTool(BaseModel):
2140
3201
  requires_approval: Optional[bool] = None
2141
3202
  r"""Whether this tool requires approval before execution"""
2142
3203
 
3204
+ @model_serializer(mode="wrap")
3205
+ def serialize_model(self, handler):
3206
+ optional_fields = set(["requires_approval"])
3207
+ serialized = handler(self)
3208
+ m = {}
3209
+
3210
+ for n, f in type(self).model_fields.items():
3211
+ k = f.alias or n
3212
+ val = serialized.get(k)
3213
+
3214
+ if val != UNSET_SENTINEL:
3215
+ if val is not None or k not in optional_fields:
3216
+ m[k] = val
3217
+
3218
+ return m
3219
+
2143
3220
 
2144
3221
  StreamRunAgentAgentToolInputRunAgentsRequestRequestBodyType = Literal[
2145
3222
  "retrieve_agents",
@@ -2162,6 +3239,22 @@ class StreamRunAgentAgentToolInputRunRetrieveAgentsTool(BaseModel):
2162
3239
  requires_approval: Optional[bool] = None
2163
3240
  r"""Whether this tool requires approval before execution"""
2164
3241
 
3242
+ @model_serializer(mode="wrap")
3243
+ def serialize_model(self, handler):
3244
+ optional_fields = set(["requires_approval"])
3245
+ serialized = handler(self)
3246
+ m = {}
3247
+
3248
+ for n, f in type(self).model_fields.items():
3249
+ k = f.alias or n
3250
+ val = serialized.get(k)
3251
+
3252
+ if val != UNSET_SENTINEL:
3253
+ if val is not None or k not in optional_fields:
3254
+ m[k] = val
3255
+
3256
+ return m
3257
+
2165
3258
 
2166
3259
  StreamRunAgentAgentToolInputRunAgentsRequestType = Literal["call_sub_agent",]
2167
3260
 
@@ -2182,6 +3275,22 @@ class StreamRunAgentAgentToolInputRunCallSubAgentTool(BaseModel):
2182
3275
  requires_approval: Optional[bool] = None
2183
3276
  r"""Whether this tool requires approval before execution"""
2184
3277
 
3278
+ @model_serializer(mode="wrap")
3279
+ def serialize_model(self, handler):
3280
+ optional_fields = set(["requires_approval"])
3281
+ serialized = handler(self)
3282
+ m = {}
3283
+
3284
+ for n, f in type(self).model_fields.items():
3285
+ k = f.alias or n
3286
+ val = serialized.get(k)
3287
+
3288
+ if val != UNSET_SENTINEL:
3289
+ if val is not None or k not in optional_fields:
3290
+ m[k] = val
3291
+
3292
+ return m
3293
+
2185
3294
 
2186
3295
  StreamRunAgentAgentToolInputRunAgentsType = Literal["web_scraper",]
2187
3296
 
@@ -2202,6 +3311,22 @@ class StreamRunAgentAgentToolInputRunWebScraperTool(BaseModel):
2202
3311
  requires_approval: Optional[bool] = None
2203
3312
  r"""Whether this tool requires approval before execution"""
2204
3313
 
3314
+ @model_serializer(mode="wrap")
3315
+ def serialize_model(self, handler):
3316
+ optional_fields = set(["requires_approval"])
3317
+ serialized = handler(self)
3318
+ m = {}
3319
+
3320
+ for n, f in type(self).model_fields.items():
3321
+ k = f.alias or n
3322
+ val = serialized.get(k)
3323
+
3324
+ if val != UNSET_SENTINEL:
3325
+ if val is not None or k not in optional_fields:
3326
+ m[k] = val
3327
+
3328
+ return m
3329
+
2205
3330
 
2206
3331
  StreamRunAgentAgentToolInputRunType = Literal["google_search",]
2207
3332
 
@@ -2222,6 +3347,22 @@ class StreamRunAgentAgentToolInputRunGoogleSearchTool(BaseModel):
2222
3347
  requires_approval: Optional[bool] = None
2223
3348
  r"""Whether this tool requires approval before execution"""
2224
3349
 
3350
+ @model_serializer(mode="wrap")
3351
+ def serialize_model(self, handler):
3352
+ optional_fields = set(["requires_approval"])
3353
+ serialized = handler(self)
3354
+ m = {}
3355
+
3356
+ for n, f in type(self).model_fields.items():
3357
+ k = f.alias or n
3358
+ val = serialized.get(k)
3359
+
3360
+ if val != UNSET_SENTINEL:
3361
+ if val is not None or k not in optional_fields:
3362
+ m[k] = val
3363
+
3364
+ return m
3365
+
2225
3366
 
2226
3367
  StreamRunAgentAgentToolInputRunTypedDict = TypeAliasType(
2227
3368
  "StreamRunAgentAgentToolInputRunTypedDict",
@@ -2240,10 +3381,11 @@ StreamRunAgentAgentToolInputRunTypedDict = TypeAliasType(
2240
3381
  AgentToolInputRunHTTPToolRunTypedDict,
2241
3382
  AgentToolInputRunCodeToolRunTypedDict,
2242
3383
  AgentToolInputRunFunctionToolRunTypedDict,
3384
+ AgentToolInputRunJSONSchemaToolRunTypedDict,
2243
3385
  AgentToolInputRunMCPToolRunTypedDict,
2244
3386
  ],
2245
3387
  )
2246
- r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
3388
+ r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
2247
3389
 
2248
3390
 
2249
3391
  StreamRunAgentAgentToolInputRun = Annotated[
@@ -2286,11 +3428,12 @@ StreamRunAgentAgentToolInputRun = Annotated[
2286
3428
  Annotated[AgentToolInputRunHTTPToolRun, Tag("http")],
2287
3429
  Annotated[AgentToolInputRunCodeToolRun, Tag("code")],
2288
3430
  Annotated[AgentToolInputRunFunctionToolRun, Tag("function")],
3431
+ Annotated[AgentToolInputRunJSONSchemaToolRun, Tag("json_schema")],
2289
3432
  Annotated[AgentToolInputRunMCPToolRun, Tag("mcp")],
2290
3433
  ],
2291
3434
  Discriminator(lambda m: get_discriminator(m, "type", "type")),
2292
3435
  ]
2293
- r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, MCP) support full inline definitions for on-the-fly creation."""
3436
+ r"""Tool configuration for agent run operations. Built-in tools only require a type and requires_approval, while custom tools (HTTP, Code, Function, JSON Schema, MCP) support full inline definitions for on-the-fly creation."""
2294
3437
 
2295
3438
 
2296
3439
  StreamRunAgentToolApprovalRequired = Literal[
@@ -2327,6 +3470,22 @@ class StreamRunAgentEvaluators(BaseModel):
2327
3470
  sample_rate: Optional[float] = 50
2328
3471
  r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
2329
3472
 
3473
+ @model_serializer(mode="wrap")
3474
+ def serialize_model(self, handler):
3475
+ optional_fields = set(["sample_rate"])
3476
+ serialized = handler(self)
3477
+ m = {}
3478
+
3479
+ for n, f in type(self).model_fields.items():
3480
+ k = f.alias or n
3481
+ val = serialized.get(k)
3482
+
3483
+ if val != UNSET_SENTINEL:
3484
+ if val is not None or k not in optional_fields:
3485
+ m[k] = val
3486
+
3487
+ return m
3488
+
2330
3489
 
2331
3490
  StreamRunAgentAgentsExecuteOn = Literal[
2332
3491
  "input",
@@ -2354,6 +3513,22 @@ class StreamRunAgentGuardrails(BaseModel):
2354
3513
  sample_rate: Optional[float] = 50
2355
3514
  r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
2356
3515
 
3516
+ @model_serializer(mode="wrap")
3517
+ def serialize_model(self, handler):
3518
+ optional_fields = set(["sample_rate"])
3519
+ serialized = handler(self)
3520
+ m = {}
3521
+
3522
+ for n, f in type(self).model_fields.items():
3523
+ k = f.alias or n
3524
+ val = serialized.get(k)
3525
+
3526
+ if val != UNSET_SENTINEL:
3527
+ if val is not None or k not in optional_fields:
3528
+ m[k] = val
3529
+
3530
+ return m
3531
+
2357
3532
 
2358
3533
  class StreamRunAgentSettingsTypedDict(TypedDict):
2359
3534
  tools: NotRequired[List[StreamRunAgentAgentToolInputRunTypedDict]]
@@ -2389,6 +3564,31 @@ class StreamRunAgentSettings(BaseModel):
2389
3564
  guardrails: Optional[List[StreamRunAgentGuardrails]] = None
2390
3565
  r"""Configuration for a guardrail applied to the agent"""
2391
3566
 
3567
+ @model_serializer(mode="wrap")
3568
+ def serialize_model(self, handler):
3569
+ optional_fields = set(
3570
+ [
3571
+ "tools",
3572
+ "tool_approval_required",
3573
+ "max_iterations",
3574
+ "max_execution_time",
3575
+ "evaluators",
3576
+ "guardrails",
3577
+ ]
3578
+ )
3579
+ serialized = handler(self)
3580
+ m = {}
3581
+
3582
+ for n, f in type(self).model_fields.items():
3583
+ k = f.alias or n
3584
+ val = serialized.get(k)
3585
+
3586
+ if val != UNSET_SENTINEL:
3587
+ if val is not None or k not in optional_fields:
3588
+ m[k] = val
3589
+
3590
+ return m
3591
+
2392
3592
 
2393
3593
  class StreamRunAgentRequestBodyTypedDict(TypedDict):
2394
3594
  key: str
@@ -2514,6 +3714,39 @@ class StreamRunAgentRequestBody(BaseModel):
2514
3714
  stream_timeout_seconds: Optional[float] = None
2515
3715
  r"""Stream timeout in seconds (1-3600). Default: 1800 (30 minutes)"""
2516
3716
 
3717
+ @model_serializer(mode="wrap")
3718
+ def serialize_model(self, handler):
3719
+ optional_fields = set(
3720
+ [
3721
+ "task_id",
3722
+ "fallback_models",
3723
+ "variables",
3724
+ "identity",
3725
+ "contact",
3726
+ "thread",
3727
+ "memory",
3728
+ "description",
3729
+ "system_prompt",
3730
+ "memory_stores",
3731
+ "knowledge_bases",
3732
+ "team_of_agents",
3733
+ "metadata",
3734
+ "stream_timeout_seconds",
3735
+ ]
3736
+ )
3737
+ serialized = handler(self)
3738
+ m = {}
3739
+
3740
+ for n, f in type(self).model_fields.items():
3741
+ k = f.alias or n
3742
+ val = serialized.get(k)
3743
+
3744
+ if val != UNSET_SENTINEL:
3745
+ if val is not None or k not in optional_fields:
3746
+ m[k] = val
3747
+
3748
+ return m
3749
+
2517
3750
 
2518
3751
  class StreamRunAgentAgentsResponseBodyData(BaseModel):
2519
3752
  message: str