orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -51,6 +51,22 @@ class ListAgentsRequest(BaseModel):
51
51
  ] = None
52
52
  r"""A cursor for use in pagination. `ending_before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 20 objects, starting with `01JJ1HDHN79XAS7A01WB3HYSDB`, your subsequent call can include `before=01JJ1HDHN79XAS7A01WB3HYSDB` in order to fetch the previous page of the list."""
53
53
 
54
+ @model_serializer(mode="wrap")
55
+ def serialize_model(self, handler):
56
+ optional_fields = set(["limit", "starting_after", "ending_before"])
57
+ serialized = handler(self)
58
+ m = {}
59
+
60
+ for n, f in type(self).model_fields.items():
61
+ k = f.alias or n
62
+ val = serialized.get(k)
63
+
64
+ if val != UNSET_SENTINEL:
65
+ if val is not None or k not in optional_fields:
66
+ m[k] = val
67
+
68
+ return m
69
+
54
70
 
55
71
  ListAgentsObject = Literal["list",]
56
72
 
@@ -133,6 +149,32 @@ class ListAgentsTools(BaseModel):
133
149
  timeout: Optional[float] = 120
134
150
  r"""Tool execution timeout in seconds (default: 2 minutes, max: 10 minutes)"""
135
151
 
152
+ @model_serializer(mode="wrap")
153
+ def serialize_model(self, handler):
154
+ optional_fields = set(
155
+ [
156
+ "key",
157
+ "display_name",
158
+ "description",
159
+ "requires_approval",
160
+ "tool_id",
161
+ "conditions",
162
+ "timeout",
163
+ ]
164
+ )
165
+ serialized = handler(self)
166
+ m = {}
167
+
168
+ for n, f in type(self).model_fields.items():
169
+ k = f.alias or n
170
+ val = serialized.get(k)
171
+
172
+ if val != UNSET_SENTINEL:
173
+ if val is not None or k not in optional_fields:
174
+ m[k] = val
175
+
176
+ return m
177
+
136
178
 
137
179
  ListAgentsExecuteOn = Literal[
138
180
  "input",
@@ -160,6 +202,22 @@ class ListAgentsEvaluators(BaseModel):
160
202
  sample_rate: Optional[float] = 50
161
203
  r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
162
204
 
205
+ @model_serializer(mode="wrap")
206
+ def serialize_model(self, handler):
207
+ optional_fields = set(["sample_rate"])
208
+ serialized = handler(self)
209
+ m = {}
210
+
211
+ for n, f in type(self).model_fields.items():
212
+ k = f.alias or n
213
+ val = serialized.get(k)
214
+
215
+ if val != UNSET_SENTINEL:
216
+ if val is not None or k not in optional_fields:
217
+ m[k] = val
218
+
219
+ return m
220
+
163
221
 
164
222
  ListAgentsAgentsExecuteOn = Literal[
165
223
  "input",
@@ -187,6 +245,22 @@ class ListAgentsGuardrails(BaseModel):
187
245
  sample_rate: Optional[float] = 50
188
246
  r"""The percentage of executions to evaluate with this evaluator (1-100). For example, a value of 50 means the evaluator will run on approximately half of the executions."""
189
247
 
248
+ @model_serializer(mode="wrap")
249
+ def serialize_model(self, handler):
250
+ optional_fields = set(["sample_rate"])
251
+ serialized = handler(self)
252
+ m = {}
253
+
254
+ for n, f in type(self).model_fields.items():
255
+ k = f.alias or n
256
+ val = serialized.get(k)
257
+
258
+ if val != UNSET_SENTINEL:
259
+ if val is not None or k not in optional_fields:
260
+ m[k] = val
261
+
262
+ return m
263
+
190
264
 
191
265
  class ListAgentsSettingsTypedDict(TypedDict):
192
266
  max_iterations: NotRequired[int]
@@ -220,6 +294,31 @@ class ListAgentsSettings(BaseModel):
220
294
  guardrails: Optional[List[ListAgentsGuardrails]] = None
221
295
  r"""Configuration for a guardrail applied to the agent"""
222
296
 
297
+ @model_serializer(mode="wrap")
298
+ def serialize_model(self, handler):
299
+ optional_fields = set(
300
+ [
301
+ "max_iterations",
302
+ "max_execution_time",
303
+ "tool_approval_required",
304
+ "tools",
305
+ "evaluators",
306
+ "guardrails",
307
+ ]
308
+ )
309
+ serialized = handler(self)
310
+ m = {}
311
+
312
+ for n, f in type(self).model_fields.items():
313
+ k = f.alias or n
314
+ val = serialized.get(k)
315
+
316
+ if val != UNSET_SENTINEL:
317
+ if val is not None or k not in optional_fields:
318
+ m[k] = val
319
+
320
+ return m
321
+
223
322
 
224
323
  ListAgentsVoice = Literal[
225
324
  "alloy",
@@ -288,6 +387,22 @@ class ListAgentsResponseFormatJSONSchema(BaseModel):
288
387
  strict: Optional[bool] = False
289
388
  r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
290
389
 
390
+ @model_serializer(mode="wrap")
391
+ def serialize_model(self, handler):
392
+ optional_fields = set(["description", "schema", "strict"])
393
+ serialized = handler(self)
394
+ m = {}
395
+
396
+ for n, f in type(self).model_fields.items():
397
+ k = f.alias or n
398
+ val = serialized.get(k)
399
+
400
+ if val != UNSET_SENTINEL:
401
+ if val is not None or k not in optional_fields:
402
+ m[k] = val
403
+
404
+ return m
405
+
291
406
 
292
407
  class ListAgentsResponseFormatAgentsJSONSchemaTypedDict(TypedDict):
293
408
  r"""
@@ -416,6 +531,22 @@ class ListAgentsStreamOptions(BaseModel):
416
531
  include_usage: Optional[bool] = None
417
532
  r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
418
533
 
534
+ @model_serializer(mode="wrap")
535
+ def serialize_model(self, handler):
536
+ optional_fields = set(["include_usage"])
537
+ serialized = handler(self)
538
+ m = {}
539
+
540
+ for n, f in type(self).model_fields.items():
541
+ k = f.alias or n
542
+ val = serialized.get(k)
543
+
544
+ if val != UNSET_SENTINEL:
545
+ if val is not None or k not in optional_fields:
546
+ m[k] = val
547
+
548
+ return m
549
+
419
550
 
420
551
  ListAgentsThinkingTypedDict = TypeAliasType(
421
552
  "ListAgentsThinkingTypedDict",
@@ -458,6 +589,22 @@ class ListAgentsToolChoice2(BaseModel):
458
589
  type: Optional[ListAgentsToolChoiceType] = None
459
590
  r"""The type of the tool. Currently, only function is supported."""
460
591
 
592
+ @model_serializer(mode="wrap")
593
+ def serialize_model(self, handler):
594
+ optional_fields = set(["type"])
595
+ serialized = handler(self)
596
+ m = {}
597
+
598
+ for n, f in type(self).model_fields.items():
599
+ k = f.alias or n
600
+ val = serialized.get(k)
601
+
602
+ if val != UNSET_SENTINEL:
603
+ if val is not None or k not in optional_fields:
604
+ m[k] = val
605
+
606
+ return m
607
+
461
608
 
462
609
  ListAgentsToolChoice1 = Literal[
463
610
  "none",
@@ -521,6 +668,154 @@ class ListAgentsAgentsGuardrails(BaseModel):
521
668
  r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
522
669
 
523
670
 
671
+ class ListAgentsFallbacksTypedDict(TypedDict):
672
+ model: str
673
+ r"""Fallback model identifier"""
674
+
675
+
676
+ class ListAgentsFallbacks(BaseModel):
677
+ model: str
678
+ r"""Fallback model identifier"""
679
+
680
+
681
+ class ListAgentsAgentsRetryTypedDict(TypedDict):
682
+ r"""Retry configuration for the request"""
683
+
684
+ count: NotRequired[float]
685
+ r"""Number of retry attempts (1-5)"""
686
+ on_codes: NotRequired[List[float]]
687
+ r"""HTTP status codes that trigger retry logic"""
688
+
689
+
690
+ class ListAgentsAgentsRetry(BaseModel):
691
+ r"""Retry configuration for the request"""
692
+
693
+ count: Optional[float] = 3
694
+ r"""Number of retry attempts (1-5)"""
695
+
696
+ on_codes: Optional[List[float]] = None
697
+ r"""HTTP status codes that trigger retry logic"""
698
+
699
+ @model_serializer(mode="wrap")
700
+ def serialize_model(self, handler):
701
+ optional_fields = set(["count", "on_codes"])
702
+ serialized = handler(self)
703
+ m = {}
704
+
705
+ for n, f in type(self).model_fields.items():
706
+ k = f.alias or n
707
+ val = serialized.get(k)
708
+
709
+ if val != UNSET_SENTINEL:
710
+ if val is not None or k not in optional_fields:
711
+ m[k] = val
712
+
713
+ return m
714
+
715
+
716
+ ListAgentsType = Literal["exact_match",]
717
+
718
+
719
+ class ListAgentsCacheTypedDict(TypedDict):
720
+ r"""Cache configuration for the request."""
721
+
722
+ type: ListAgentsType
723
+ ttl: NotRequired[float]
724
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
725
+
726
+
727
+ class ListAgentsCache(BaseModel):
728
+ r"""Cache configuration for the request."""
729
+
730
+ type: ListAgentsType
731
+
732
+ ttl: Optional[float] = 1800
733
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
734
+
735
+ @model_serializer(mode="wrap")
736
+ def serialize_model(self, handler):
737
+ optional_fields = set(["ttl"])
738
+ serialized = handler(self)
739
+ m = {}
740
+
741
+ for n, f in type(self).model_fields.items():
742
+ k = f.alias or n
743
+ val = serialized.get(k)
744
+
745
+ if val != UNSET_SENTINEL:
746
+ if val is not None or k not in optional_fields:
747
+ m[k] = val
748
+
749
+ return m
750
+
751
+
752
+ ListAgentsLoadBalancerType = Literal["weight_based",]
753
+
754
+
755
+ class ListAgentsLoadBalancerModelsTypedDict(TypedDict):
756
+ model: str
757
+ r"""Model identifier for load balancing"""
758
+ weight: NotRequired[float]
759
+ r"""Weight assigned to this model for load balancing"""
760
+
761
+
762
+ class ListAgentsLoadBalancerModels(BaseModel):
763
+ model: str
764
+ r"""Model identifier for load balancing"""
765
+
766
+ weight: Optional[float] = 0.5
767
+ r"""Weight assigned to this model for load balancing"""
768
+
769
+ @model_serializer(mode="wrap")
770
+ def serialize_model(self, handler):
771
+ optional_fields = set(["weight"])
772
+ serialized = handler(self)
773
+ m = {}
774
+
775
+ for n, f in type(self).model_fields.items():
776
+ k = f.alias or n
777
+ val = serialized.get(k)
778
+
779
+ if val != UNSET_SENTINEL:
780
+ if val is not None or k not in optional_fields:
781
+ m[k] = val
782
+
783
+ return m
784
+
785
+
786
+ class ListAgentsLoadBalancer1TypedDict(TypedDict):
787
+ type: ListAgentsLoadBalancerType
788
+ models: List[ListAgentsLoadBalancerModelsTypedDict]
789
+
790
+
791
+ class ListAgentsLoadBalancer1(BaseModel):
792
+ type: ListAgentsLoadBalancerType
793
+
794
+ models: List[ListAgentsLoadBalancerModels]
795
+
796
+
797
+ ListAgentsLoadBalancerTypedDict = ListAgentsLoadBalancer1TypedDict
798
+ r"""Load balancer configuration for the request."""
799
+
800
+
801
+ ListAgentsLoadBalancer = ListAgentsLoadBalancer1
802
+ r"""Load balancer configuration for the request."""
803
+
804
+
805
+ class ListAgentsTimeoutTypedDict(TypedDict):
806
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
807
+
808
+ call_timeout: float
809
+ r"""Timeout value in milliseconds"""
810
+
811
+
812
+ class ListAgentsTimeout(BaseModel):
813
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
814
+
815
+ call_timeout: float
816
+ r"""Timeout value in milliseconds"""
817
+
818
+
524
819
  class ListAgentsParametersTypedDict(TypedDict):
525
820
  r"""Model behavior parameters (snake_case) stored as part of the agent configuration. These become the default parameters used when the agent is executed. Commonly used: temperature (0-1, controls randomness), max_completion_tokens (response length), top_p (nucleus sampling). Advanced: frequency_penalty, presence_penalty, response_format (JSON/structured output), reasoning_effort (for o1/thinking models), seed (reproducibility), stop sequences. Model-specific support varies. Runtime parameters in agent execution requests can override these defaults."""
526
821
 
@@ -578,6 +873,16 @@ class ListAgentsParametersTypedDict(TypedDict):
578
873
  r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
579
874
  guardrails: NotRequired[List[ListAgentsAgentsGuardrailsTypedDict]]
580
875
  r"""A list of guardrails to apply to the request."""
876
+ fallbacks: NotRequired[List[ListAgentsFallbacksTypedDict]]
877
+ r"""Array of fallback models to use if primary model fails"""
878
+ retry: NotRequired[ListAgentsAgentsRetryTypedDict]
879
+ r"""Retry configuration for the request"""
880
+ cache: NotRequired[ListAgentsCacheTypedDict]
881
+ r"""Cache configuration for the request."""
882
+ load_balancer: NotRequired[ListAgentsLoadBalancerTypedDict]
883
+ r"""Load balancer configuration for the request."""
884
+ timeout: NotRequired[ListAgentsTimeoutTypedDict]
885
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
581
886
 
582
887
 
583
888
  class ListAgentsParameters(BaseModel):
@@ -659,72 +964,91 @@ class ListAgentsParameters(BaseModel):
659
964
  guardrails: Optional[List[ListAgentsAgentsGuardrails]] = None
660
965
  r"""A list of guardrails to apply to the request."""
661
966
 
967
+ fallbacks: Optional[List[ListAgentsFallbacks]] = None
968
+ r"""Array of fallback models to use if primary model fails"""
969
+
970
+ retry: Optional[ListAgentsAgentsRetry] = None
971
+ r"""Retry configuration for the request"""
972
+
973
+ cache: Optional[ListAgentsCache] = None
974
+ r"""Cache configuration for the request."""
975
+
976
+ load_balancer: Optional[ListAgentsLoadBalancer] = None
977
+ r"""Load balancer configuration for the request."""
978
+
979
+ timeout: Optional[ListAgentsTimeout] = None
980
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
981
+
662
982
  @model_serializer(mode="wrap")
663
983
  def serialize_model(self, handler):
664
- optional_fields = [
665
- "audio",
666
- "frequency_penalty",
667
- "max_tokens",
668
- "max_completion_tokens",
669
- "logprobs",
670
- "top_logprobs",
671
- "n",
672
- "presence_penalty",
673
- "response_format",
674
- "reasoning_effort",
675
- "verbosity",
676
- "seed",
677
- "stop",
678
- "stream_options",
679
- "thinking",
680
- "temperature",
681
- "top_p",
682
- "top_k",
683
- "tool_choice",
684
- "parallel_tool_calls",
685
- "modalities",
686
- "guardrails",
687
- ]
688
- nullable_fields = [
689
- "audio",
690
- "frequency_penalty",
691
- "max_tokens",
692
- "max_completion_tokens",
693
- "logprobs",
694
- "top_logprobs",
695
- "n",
696
- "presence_penalty",
697
- "seed",
698
- "stop",
699
- "stream_options",
700
- "temperature",
701
- "top_p",
702
- "top_k",
703
- "modalities",
704
- ]
705
- null_default_fields = []
706
-
984
+ optional_fields = set(
985
+ [
986
+ "audio",
987
+ "frequency_penalty",
988
+ "max_tokens",
989
+ "max_completion_tokens",
990
+ "logprobs",
991
+ "top_logprobs",
992
+ "n",
993
+ "presence_penalty",
994
+ "response_format",
995
+ "reasoning_effort",
996
+ "verbosity",
997
+ "seed",
998
+ "stop",
999
+ "stream_options",
1000
+ "thinking",
1001
+ "temperature",
1002
+ "top_p",
1003
+ "top_k",
1004
+ "tool_choice",
1005
+ "parallel_tool_calls",
1006
+ "modalities",
1007
+ "guardrails",
1008
+ "fallbacks",
1009
+ "retry",
1010
+ "cache",
1011
+ "load_balancer",
1012
+ "timeout",
1013
+ ]
1014
+ )
1015
+ nullable_fields = set(
1016
+ [
1017
+ "audio",
1018
+ "frequency_penalty",
1019
+ "max_tokens",
1020
+ "max_completion_tokens",
1021
+ "logprobs",
1022
+ "top_logprobs",
1023
+ "n",
1024
+ "presence_penalty",
1025
+ "seed",
1026
+ "stop",
1027
+ "stream_options",
1028
+ "temperature",
1029
+ "top_p",
1030
+ "top_k",
1031
+ "modalities",
1032
+ ]
1033
+ )
707
1034
  serialized = handler(self)
708
-
709
1035
  m = {}
710
1036
 
711
1037
  for n, f in type(self).model_fields.items():
712
1038
  k = f.alias or n
713
1039
  val = serialized.get(k)
714
- serialized.pop(k, None)
715
-
716
- optional_nullable = k in optional_fields and k in nullable_fields
717
- is_set = (
718
- self.__pydantic_fields_set__.intersection({n})
719
- or k in null_default_fields
720
- ) # pylint: disable=no-member
721
-
722
- if val is not None and val != UNSET_SENTINEL:
723
- m[k] = val
724
- elif val != UNSET_SENTINEL and (
725
- not k in optional_fields or (optional_nullable and is_set)
726
- ):
727
- m[k] = val
1040
+ is_nullable_and_explicitly_set = (
1041
+ k in nullable_fields
1042
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1043
+ )
1044
+
1045
+ if val != UNSET_SENTINEL:
1046
+ if (
1047
+ val is not None
1048
+ or k not in optional_fields
1049
+ or is_nullable_and_explicitly_set
1050
+ ):
1051
+ m[k] = val
728
1052
 
729
1053
  return m
730
1054
 
@@ -747,6 +1071,22 @@ class ListAgentsRetry(BaseModel):
747
1071
  on_codes: Optional[List[float]] = None
748
1072
  r"""HTTP status codes that trigger retry logic"""
749
1073
 
1074
+ @model_serializer(mode="wrap")
1075
+ def serialize_model(self, handler):
1076
+ optional_fields = set(["count", "on_codes"])
1077
+ serialized = handler(self)
1078
+ m = {}
1079
+
1080
+ for n, f in type(self).model_fields.items():
1081
+ k = f.alias or n
1082
+ val = serialized.get(k)
1083
+
1084
+ if val != UNSET_SENTINEL:
1085
+ if val is not None or k not in optional_fields:
1086
+ m[k] = val
1087
+
1088
+ return m
1089
+
750
1090
 
751
1091
  ListAgentsFallbackModelConfigurationVoice = Literal[
752
1092
  "alloy",
@@ -819,6 +1159,22 @@ class ListAgentsResponseFormatAgentsResponseJSONSchema(BaseModel):
819
1159
  strict: Optional[bool] = False
820
1160
  r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
821
1161
 
1162
+ @model_serializer(mode="wrap")
1163
+ def serialize_model(self, handler):
1164
+ optional_fields = set(["description", "schema", "strict"])
1165
+ serialized = handler(self)
1166
+ m = {}
1167
+
1168
+ for n, f in type(self).model_fields.items():
1169
+ k = f.alias or n
1170
+ val = serialized.get(k)
1171
+
1172
+ if val != UNSET_SENTINEL:
1173
+ if val is not None or k not in optional_fields:
1174
+ m[k] = val
1175
+
1176
+ return m
1177
+
822
1178
 
823
1179
  class ListAgentsResponseFormatAgentsResponse200JSONSchemaTypedDict(TypedDict):
824
1180
  r"""
@@ -951,6 +1307,22 @@ class ListAgentsFallbackModelConfigurationStreamOptions(BaseModel):
951
1307
  include_usage: Optional[bool] = None
952
1308
  r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
953
1309
 
1310
+ @model_serializer(mode="wrap")
1311
+ def serialize_model(self, handler):
1312
+ optional_fields = set(["include_usage"])
1313
+ serialized = handler(self)
1314
+ m = {}
1315
+
1316
+ for n, f in type(self).model_fields.items():
1317
+ k = f.alias or n
1318
+ val = serialized.get(k)
1319
+
1320
+ if val != UNSET_SENTINEL:
1321
+ if val is not None or k not in optional_fields:
1322
+ m[k] = val
1323
+
1324
+ return m
1325
+
954
1326
 
955
1327
  ListAgentsFallbackModelConfigurationThinkingTypedDict = TypeAliasType(
956
1328
  "ListAgentsFallbackModelConfigurationThinkingTypedDict",
@@ -993,6 +1365,22 @@ class ListAgentsToolChoiceAgents2(BaseModel):
993
1365
  type: Optional[ListAgentsToolChoiceAgentsType] = None
994
1366
  r"""The type of the tool. Currently, only function is supported."""
995
1367
 
1368
+ @model_serializer(mode="wrap")
1369
+ def serialize_model(self, handler):
1370
+ optional_fields = set(["type"])
1371
+ serialized = handler(self)
1372
+ m = {}
1373
+
1374
+ for n, f in type(self).model_fields.items():
1375
+ k = f.alias or n
1376
+ val = serialized.get(k)
1377
+
1378
+ if val != UNSET_SENTINEL:
1379
+ if val is not None or k not in optional_fields:
1380
+ m[k] = val
1381
+
1382
+ return m
1383
+
996
1384
 
997
1385
  ListAgentsToolChoiceAgents1 = Literal[
998
1386
  "none",
@@ -1059,6 +1447,156 @@ class ListAgentsFallbackModelConfigurationGuardrails(BaseModel):
1059
1447
  r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1060
1448
 
1061
1449
 
1450
+ class ListAgentsFallbackModelConfigurationFallbacksTypedDict(TypedDict):
1451
+ model: str
1452
+ r"""Fallback model identifier"""
1453
+
1454
+
1455
+ class ListAgentsFallbackModelConfigurationFallbacks(BaseModel):
1456
+ model: str
1457
+ r"""Fallback model identifier"""
1458
+
1459
+
1460
+ class ListAgentsFallbackModelConfigurationAgentsRetryTypedDict(TypedDict):
1461
+ r"""Retry configuration for the request"""
1462
+
1463
+ count: NotRequired[float]
1464
+ r"""Number of retry attempts (1-5)"""
1465
+ on_codes: NotRequired[List[float]]
1466
+ r"""HTTP status codes that trigger retry logic"""
1467
+
1468
+
1469
+ class ListAgentsFallbackModelConfigurationAgentsRetry(BaseModel):
1470
+ r"""Retry configuration for the request"""
1471
+
1472
+ count: Optional[float] = 3
1473
+ r"""Number of retry attempts (1-5)"""
1474
+
1475
+ on_codes: Optional[List[float]] = None
1476
+ r"""HTTP status codes that trigger retry logic"""
1477
+
1478
+ @model_serializer(mode="wrap")
1479
+ def serialize_model(self, handler):
1480
+ optional_fields = set(["count", "on_codes"])
1481
+ serialized = handler(self)
1482
+ m = {}
1483
+
1484
+ for n, f in type(self).model_fields.items():
1485
+ k = f.alias or n
1486
+ val = serialized.get(k)
1487
+
1488
+ if val != UNSET_SENTINEL:
1489
+ if val is not None or k not in optional_fields:
1490
+ m[k] = val
1491
+
1492
+ return m
1493
+
1494
+
1495
+ ListAgentsFallbackModelConfigurationType = Literal["exact_match",]
1496
+
1497
+
1498
+ class ListAgentsFallbackModelConfigurationCacheTypedDict(TypedDict):
1499
+ r"""Cache configuration for the request."""
1500
+
1501
+ type: ListAgentsFallbackModelConfigurationType
1502
+ ttl: NotRequired[float]
1503
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1504
+
1505
+
1506
+ class ListAgentsFallbackModelConfigurationCache(BaseModel):
1507
+ r"""Cache configuration for the request."""
1508
+
1509
+ type: ListAgentsFallbackModelConfigurationType
1510
+
1511
+ ttl: Optional[float] = 1800
1512
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1513
+
1514
+ @model_serializer(mode="wrap")
1515
+ def serialize_model(self, handler):
1516
+ optional_fields = set(["ttl"])
1517
+ serialized = handler(self)
1518
+ m = {}
1519
+
1520
+ for n, f in type(self).model_fields.items():
1521
+ k = f.alias or n
1522
+ val = serialized.get(k)
1523
+
1524
+ if val != UNSET_SENTINEL:
1525
+ if val is not None or k not in optional_fields:
1526
+ m[k] = val
1527
+
1528
+ return m
1529
+
1530
+
1531
+ ListAgentsLoadBalancerAgentsType = Literal["weight_based",]
1532
+
1533
+
1534
+ class ListAgentsLoadBalancerAgentsModelsTypedDict(TypedDict):
1535
+ model: str
1536
+ r"""Model identifier for load balancing"""
1537
+ weight: NotRequired[float]
1538
+ r"""Weight assigned to this model for load balancing"""
1539
+
1540
+
1541
+ class ListAgentsLoadBalancerAgentsModels(BaseModel):
1542
+ model: str
1543
+ r"""Model identifier for load balancing"""
1544
+
1545
+ weight: Optional[float] = 0.5
1546
+ r"""Weight assigned to this model for load balancing"""
1547
+
1548
+ @model_serializer(mode="wrap")
1549
+ def serialize_model(self, handler):
1550
+ optional_fields = set(["weight"])
1551
+ serialized = handler(self)
1552
+ m = {}
1553
+
1554
+ for n, f in type(self).model_fields.items():
1555
+ k = f.alias or n
1556
+ val = serialized.get(k)
1557
+
1558
+ if val != UNSET_SENTINEL:
1559
+ if val is not None or k not in optional_fields:
1560
+ m[k] = val
1561
+
1562
+ return m
1563
+
1564
+
1565
+ class ListAgentsLoadBalancerAgents1TypedDict(TypedDict):
1566
+ type: ListAgentsLoadBalancerAgentsType
1567
+ models: List[ListAgentsLoadBalancerAgentsModelsTypedDict]
1568
+
1569
+
1570
+ class ListAgentsLoadBalancerAgents1(BaseModel):
1571
+ type: ListAgentsLoadBalancerAgentsType
1572
+
1573
+ models: List[ListAgentsLoadBalancerAgentsModels]
1574
+
1575
+
1576
+ ListAgentsFallbackModelConfigurationLoadBalancerTypedDict = (
1577
+ ListAgentsLoadBalancerAgents1TypedDict
1578
+ )
1579
+ r"""Load balancer configuration for the request."""
1580
+
1581
+
1582
+ ListAgentsFallbackModelConfigurationLoadBalancer = ListAgentsLoadBalancerAgents1
1583
+ r"""Load balancer configuration for the request."""
1584
+
1585
+
1586
+ class ListAgentsFallbackModelConfigurationTimeoutTypedDict(TypedDict):
1587
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1588
+
1589
+ call_timeout: float
1590
+ r"""Timeout value in milliseconds"""
1591
+
1592
+
1593
+ class ListAgentsFallbackModelConfigurationTimeout(BaseModel):
1594
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1595
+
1596
+ call_timeout: float
1597
+ r"""Timeout value in milliseconds"""
1598
+
1599
+
1062
1600
  class ListAgentsFallbackModelConfigurationParametersTypedDict(TypedDict):
1063
1601
  r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
1064
1602
 
@@ -1124,6 +1662,18 @@ class ListAgentsFallbackModelConfigurationParametersTypedDict(TypedDict):
1124
1662
  List[ListAgentsFallbackModelConfigurationGuardrailsTypedDict]
1125
1663
  ]
1126
1664
  r"""A list of guardrails to apply to the request."""
1665
+ fallbacks: NotRequired[List[ListAgentsFallbackModelConfigurationFallbacksTypedDict]]
1666
+ r"""Array of fallback models to use if primary model fails"""
1667
+ retry: NotRequired[ListAgentsFallbackModelConfigurationAgentsRetryTypedDict]
1668
+ r"""Retry configuration for the request"""
1669
+ cache: NotRequired[ListAgentsFallbackModelConfigurationCacheTypedDict]
1670
+ r"""Cache configuration for the request."""
1671
+ load_balancer: NotRequired[
1672
+ ListAgentsFallbackModelConfigurationLoadBalancerTypedDict
1673
+ ]
1674
+ r"""Load balancer configuration for the request."""
1675
+ timeout: NotRequired[ListAgentsFallbackModelConfigurationTimeoutTypedDict]
1676
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1127
1677
 
1128
1678
 
1129
1679
  class ListAgentsFallbackModelConfigurationParameters(BaseModel):
@@ -1211,72 +1761,91 @@ class ListAgentsFallbackModelConfigurationParameters(BaseModel):
1211
1761
  guardrails: Optional[List[ListAgentsFallbackModelConfigurationGuardrails]] = None
1212
1762
  r"""A list of guardrails to apply to the request."""
1213
1763
 
1764
+ fallbacks: Optional[List[ListAgentsFallbackModelConfigurationFallbacks]] = None
1765
+ r"""Array of fallback models to use if primary model fails"""
1766
+
1767
+ retry: Optional[ListAgentsFallbackModelConfigurationAgentsRetry] = None
1768
+ r"""Retry configuration for the request"""
1769
+
1770
+ cache: Optional[ListAgentsFallbackModelConfigurationCache] = None
1771
+ r"""Cache configuration for the request."""
1772
+
1773
+ load_balancer: Optional[ListAgentsFallbackModelConfigurationLoadBalancer] = None
1774
+ r"""Load balancer configuration for the request."""
1775
+
1776
+ timeout: Optional[ListAgentsFallbackModelConfigurationTimeout] = None
1777
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1778
+
1214
1779
  @model_serializer(mode="wrap")
1215
1780
  def serialize_model(self, handler):
1216
- optional_fields = [
1217
- "audio",
1218
- "frequency_penalty",
1219
- "max_tokens",
1220
- "max_completion_tokens",
1221
- "logprobs",
1222
- "top_logprobs",
1223
- "n",
1224
- "presence_penalty",
1225
- "response_format",
1226
- "reasoning_effort",
1227
- "verbosity",
1228
- "seed",
1229
- "stop",
1230
- "stream_options",
1231
- "thinking",
1232
- "temperature",
1233
- "top_p",
1234
- "top_k",
1235
- "tool_choice",
1236
- "parallel_tool_calls",
1237
- "modalities",
1238
- "guardrails",
1239
- ]
1240
- nullable_fields = [
1241
- "audio",
1242
- "frequency_penalty",
1243
- "max_tokens",
1244
- "max_completion_tokens",
1245
- "logprobs",
1246
- "top_logprobs",
1247
- "n",
1248
- "presence_penalty",
1249
- "seed",
1250
- "stop",
1251
- "stream_options",
1252
- "temperature",
1253
- "top_p",
1254
- "top_k",
1255
- "modalities",
1256
- ]
1257
- null_default_fields = []
1258
-
1781
+ optional_fields = set(
1782
+ [
1783
+ "audio",
1784
+ "frequency_penalty",
1785
+ "max_tokens",
1786
+ "max_completion_tokens",
1787
+ "logprobs",
1788
+ "top_logprobs",
1789
+ "n",
1790
+ "presence_penalty",
1791
+ "response_format",
1792
+ "reasoning_effort",
1793
+ "verbosity",
1794
+ "seed",
1795
+ "stop",
1796
+ "stream_options",
1797
+ "thinking",
1798
+ "temperature",
1799
+ "top_p",
1800
+ "top_k",
1801
+ "tool_choice",
1802
+ "parallel_tool_calls",
1803
+ "modalities",
1804
+ "guardrails",
1805
+ "fallbacks",
1806
+ "retry",
1807
+ "cache",
1808
+ "load_balancer",
1809
+ "timeout",
1810
+ ]
1811
+ )
1812
+ nullable_fields = set(
1813
+ [
1814
+ "audio",
1815
+ "frequency_penalty",
1816
+ "max_tokens",
1817
+ "max_completion_tokens",
1818
+ "logprobs",
1819
+ "top_logprobs",
1820
+ "n",
1821
+ "presence_penalty",
1822
+ "seed",
1823
+ "stop",
1824
+ "stream_options",
1825
+ "temperature",
1826
+ "top_p",
1827
+ "top_k",
1828
+ "modalities",
1829
+ ]
1830
+ )
1259
1831
  serialized = handler(self)
1260
-
1261
1832
  m = {}
1262
1833
 
1263
1834
  for n, f in type(self).model_fields.items():
1264
1835
  k = f.alias or n
1265
1836
  val = serialized.get(k)
1266
- serialized.pop(k, None)
1267
-
1268
- optional_nullable = k in optional_fields and k in nullable_fields
1269
- is_set = (
1270
- self.__pydantic_fields_set__.intersection({n})
1271
- or k in null_default_fields
1272
- ) # pylint: disable=no-member
1273
-
1274
- if val is not None and val != UNSET_SENTINEL:
1275
- m[k] = val
1276
- elif val != UNSET_SENTINEL and (
1277
- not k in optional_fields or (optional_nullable and is_set)
1278
- ):
1279
- m[k] = val
1837
+ is_nullable_and_explicitly_set = (
1838
+ k in nullable_fields
1839
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1840
+ )
1841
+
1842
+ if val != UNSET_SENTINEL:
1843
+ if (
1844
+ val is not None
1845
+ or k not in optional_fields
1846
+ or is_nullable_and_explicitly_set
1847
+ ):
1848
+ m[k] = val
1280
1849
 
1281
1850
  return m
1282
1851
 
@@ -1299,6 +1868,22 @@ class ListAgentsFallbackModelConfigurationRetry(BaseModel):
1299
1868
  on_codes: Optional[List[float]] = None
1300
1869
  r"""HTTP status codes that trigger retry logic"""
1301
1870
 
1871
+ @model_serializer(mode="wrap")
1872
+ def serialize_model(self, handler):
1873
+ optional_fields = set(["count", "on_codes"])
1874
+ serialized = handler(self)
1875
+ m = {}
1876
+
1877
+ for n, f in type(self).model_fields.items():
1878
+ k = f.alias or n
1879
+ val = serialized.get(k)
1880
+
1881
+ if val != UNSET_SENTINEL:
1882
+ if val is not None or k not in optional_fields:
1883
+ m[k] = val
1884
+
1885
+ return m
1886
+
1302
1887
 
1303
1888
  class ListAgentsFallbackModelConfiguration2TypedDict(TypedDict):
1304
1889
  r"""Fallback model configuration with optional parameters and retry settings."""
@@ -1323,6 +1908,22 @@ class ListAgentsFallbackModelConfiguration2(BaseModel):
1323
1908
  retry: Optional[ListAgentsFallbackModelConfigurationRetry] = None
1324
1909
  r"""Retry configuration for this fallback model. Allows customizing retry count (1-5) and HTTP status codes that trigger retries."""
1325
1910
 
1911
+ @model_serializer(mode="wrap")
1912
+ def serialize_model(self, handler):
1913
+ optional_fields = set(["parameters", "retry"])
1914
+ serialized = handler(self)
1915
+ m = {}
1916
+
1917
+ for n, f in type(self).model_fields.items():
1918
+ k = f.alias or n
1919
+ val = serialized.get(k)
1920
+
1921
+ if val != UNSET_SENTINEL:
1922
+ if val is not None or k not in optional_fields:
1923
+ m[k] = val
1924
+
1925
+ return m
1926
+
1326
1927
 
1327
1928
  ListAgentsFallbackModelConfigurationTypedDict = TypeAliasType(
1328
1929
  "ListAgentsFallbackModelConfigurationTypedDict",
@@ -1373,31 +1974,28 @@ class ListAgentsModel(BaseModel):
1373
1974
 
1374
1975
  @model_serializer(mode="wrap")
1375
1976
  def serialize_model(self, handler):
1376
- optional_fields = ["integration_id", "parameters", "retry", "fallback_models"]
1377
- nullable_fields = ["integration_id", "fallback_models"]
1378
- null_default_fields = []
1379
-
1977
+ optional_fields = set(
1978
+ ["integration_id", "parameters", "retry", "fallback_models"]
1979
+ )
1980
+ nullable_fields = set(["integration_id", "fallback_models"])
1380
1981
  serialized = handler(self)
1381
-
1382
1982
  m = {}
1383
1983
 
1384
1984
  for n, f in type(self).model_fields.items():
1385
1985
  k = f.alias or n
1386
1986
  val = serialized.get(k)
1387
- serialized.pop(k, None)
1388
-
1389
- optional_nullable = k in optional_fields and k in nullable_fields
1390
- is_set = (
1391
- self.__pydantic_fields_set__.intersection({n})
1392
- or k in null_default_fields
1393
- ) # pylint: disable=no-member
1394
-
1395
- if val is not None and val != UNSET_SENTINEL:
1396
- m[k] = val
1397
- elif val != UNSET_SENTINEL and (
1398
- not k in optional_fields or (optional_nullable and is_set)
1399
- ):
1400
- m[k] = val
1987
+ is_nullable_and_explicitly_set = (
1988
+ k in nullable_fields
1989
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1990
+ )
1991
+
1992
+ if val != UNSET_SENTINEL:
1993
+ if (
1994
+ val is not None
1995
+ or k not in optional_fields
1996
+ or is_nullable_and_explicitly_set
1997
+ ):
1998
+ m[k] = val
1401
1999
 
1402
2000
  return m
1403
2001
 
@@ -1416,6 +2014,22 @@ class ListAgentsTeamOfAgents(BaseModel):
1416
2014
  role: Optional[str] = None
1417
2015
  r"""The role of the agent in this context. This is used to give extra information to the leader to help it decide which agent to hand off to."""
1418
2016
 
2017
+ @model_serializer(mode="wrap")
2018
+ def serialize_model(self, handler):
2019
+ optional_fields = set(["role"])
2020
+ serialized = handler(self)
2021
+ m = {}
2022
+
2023
+ for n, f in type(self).model_fields.items():
2024
+ k = f.alias or n
2025
+ val = serialized.get(k)
2026
+
2027
+ if val != UNSET_SENTINEL:
2028
+ if val is not None or k not in optional_fields:
2029
+ m[k] = val
2030
+
2031
+ return m
2032
+
1419
2033
 
1420
2034
  class ListAgentsMetricsTypedDict(TypedDict):
1421
2035
  total_cost: NotRequired[float]
@@ -1424,6 +2038,22 @@ class ListAgentsMetricsTypedDict(TypedDict):
1424
2038
  class ListAgentsMetrics(BaseModel):
1425
2039
  total_cost: Optional[float] = 0
1426
2040
 
2041
+ @model_serializer(mode="wrap")
2042
+ def serialize_model(self, handler):
2043
+ optional_fields = set(["total_cost"])
2044
+ serialized = handler(self)
2045
+ m = {}
2046
+
2047
+ for n, f in type(self).model_fields.items():
2048
+ k = f.alias or n
2049
+ val = serialized.get(k)
2050
+
2051
+ if val != UNSET_SENTINEL:
2052
+ if val is not None or k not in optional_fields:
2053
+ m[k] = val
2054
+
2055
+ return m
2056
+
1427
2057
 
1428
2058
  class ListAgentsKnowledgeBasesTypedDict(TypedDict):
1429
2059
  knowledge_id: str
@@ -1538,43 +2168,40 @@ class ListAgentsData(BaseModel):
1538
2168
 
1539
2169
  @model_serializer(mode="wrap")
1540
2170
  def serialize_model(self, handler):
1541
- optional_fields = [
1542
- "created_by_id",
1543
- "updated_by_id",
1544
- "created",
1545
- "updated",
1546
- "system_prompt",
1547
- "settings",
1548
- "version_hash",
1549
- "metrics",
1550
- "variables",
1551
- "knowledge_bases",
1552
- "source",
1553
- ]
1554
- nullable_fields = ["created_by_id", "updated_by_id"]
1555
- null_default_fields = []
1556
-
2171
+ optional_fields = set(
2172
+ [
2173
+ "created_by_id",
2174
+ "updated_by_id",
2175
+ "created",
2176
+ "updated",
2177
+ "system_prompt",
2178
+ "settings",
2179
+ "version_hash",
2180
+ "metrics",
2181
+ "variables",
2182
+ "knowledge_bases",
2183
+ "source",
2184
+ ]
2185
+ )
2186
+ nullable_fields = set(["created_by_id", "updated_by_id"])
1557
2187
  serialized = handler(self)
1558
-
1559
2188
  m = {}
1560
2189
 
1561
2190
  for n, f in type(self).model_fields.items():
1562
2191
  k = f.alias or n
1563
2192
  val = serialized.get(k)
1564
- serialized.pop(k, None)
1565
-
1566
- optional_nullable = k in optional_fields and k in nullable_fields
1567
- is_set = (
1568
- self.__pydantic_fields_set__.intersection({n})
1569
- or k in null_default_fields
1570
- ) # pylint: disable=no-member
1571
-
1572
- if val is not None and val != UNSET_SENTINEL:
1573
- m[k] = val
1574
- elif val != UNSET_SENTINEL and (
1575
- not k in optional_fields or (optional_nullable and is_set)
1576
- ):
1577
- m[k] = val
2193
+ is_nullable_and_explicitly_set = (
2194
+ k in nullable_fields
2195
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2196
+ )
2197
+
2198
+ if val != UNSET_SENTINEL:
2199
+ if (
2200
+ val is not None
2201
+ or k not in optional_fields
2202
+ or is_nullable_and_explicitly_set
2203
+ ):
2204
+ m[k] = val
1578
2205
 
1579
2206
  return m
1580
2207