orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -1,6 +1,30 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .audiocontentpartschema import (
5
+ AudioContentPartSchema,
6
+ AudioContentPartSchemaTypedDict,
7
+ )
8
+ from .filecontentpartschema import FileContentPartSchema, FileContentPartSchemaTypedDict
9
+ from .imagecontentpartschema import (
10
+ ImageContentPartSchema,
11
+ ImageContentPartSchemaTypedDict,
12
+ )
13
+ from .reasoningpartschema import ReasoningPartSchema, ReasoningPartSchemaTypedDict
14
+ from .redactedreasoningpartschema import (
15
+ RedactedReasoningPartSchema,
16
+ RedactedReasoningPartSchemaTypedDict,
17
+ )
18
+ from .refusalpartschema import RefusalPartSchema, RefusalPartSchemaTypedDict
19
+ from .textcontentpartschema import TextContentPartSchema, TextContentPartSchemaTypedDict
20
+ from .thinkingconfigdisabledschema import (
21
+ ThinkingConfigDisabledSchema,
22
+ ThinkingConfigDisabledSchemaTypedDict,
23
+ )
24
+ from .thinkingconfigenabledschema import (
25
+ ThinkingConfigEnabledSchema,
26
+ ThinkingConfigEnabledSchemaTypedDict,
27
+ )
4
28
  from orq_ai_sdk.types import (
5
29
  BaseModel,
6
30
  Nullable,
@@ -12,7 +36,13 @@ from orq_ai_sdk.utils import FieldMetadata, QueryParamMetadata, get_discriminato
12
36
  import pydantic
13
37
  from pydantic import Discriminator, Tag, model_serializer
14
38
  from typing import Any, Dict, List, Literal, Optional, Union
15
- from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
39
+ from typing_extensions import (
40
+ Annotated,
41
+ NotRequired,
42
+ TypeAliasType,
43
+ TypedDict,
44
+ deprecated,
45
+ )
16
46
 
17
47
 
18
48
  class GetAllPromptsRequestTypedDict(TypedDict):
@@ -43,6 +73,22 @@ class GetAllPromptsRequest(BaseModel):
43
73
  ] = None
44
74
  r"""A cursor for use in pagination. `ending_before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 20 objects, starting with `01JJ1HDHN79XAS7A01WB3HYSDB`, your subsequent call can include `before=01JJ1HDHN79XAS7A01WB3HYSDB` in order to fetch the previous page of the list."""
45
75
 
76
+ @model_serializer(mode="wrap")
77
+ def serialize_model(self, handler):
78
+ optional_fields = set(["limit", "starting_after", "ending_before"])
79
+ serialized = handler(self)
80
+ m = {}
81
+
82
+ for n, f in type(self).model_fields.items():
83
+ k = f.alias or n
84
+ val = serialized.get(k)
85
+
86
+ if val != UNSET_SENTINEL:
87
+ if val is not None or k not in optional_fields:
88
+ m[k] = val
89
+
90
+ return m
91
+
46
92
 
47
93
  GetAllPromptsObject = Literal["list",]
48
94
 
@@ -58,6 +104,7 @@ GetAllPromptsModelType = Literal[
58
104
  "tts",
59
105
  "stt",
60
106
  "rerank",
107
+ "ocr",
61
108
  "moderation",
62
109
  "vision",
63
110
  ]
@@ -98,39 +145,43 @@ GetAllPromptsResponseFormat4 = Literal[
98
145
  ]
99
146
 
100
147
 
101
- GetAllPromptsResponseFormatPromptsResponseType = Literal["text",]
148
+ GetAllPromptsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType = Literal[
149
+ "text",
150
+ ]
102
151
 
103
152
 
104
153
  class GetAllPromptsResponseFormat3TypedDict(TypedDict):
105
- type: GetAllPromptsResponseFormatPromptsResponseType
154
+ type: GetAllPromptsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
106
155
 
107
156
 
108
157
  class GetAllPromptsResponseFormat3(BaseModel):
109
- type: GetAllPromptsResponseFormatPromptsResponseType
158
+ type: GetAllPromptsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
110
159
 
111
160
 
112
- GetAllPromptsResponseFormatPromptsType = Literal["json_object",]
161
+ GetAllPromptsResponseFormatPromptsResponse200ApplicationJSONType = Literal[
162
+ "json_object",
163
+ ]
113
164
 
114
165
 
115
166
  class GetAllPromptsResponseFormat2TypedDict(TypedDict):
116
- type: GetAllPromptsResponseFormatPromptsType
167
+ type: GetAllPromptsResponseFormatPromptsResponse200ApplicationJSONType
117
168
 
118
169
 
119
170
  class GetAllPromptsResponseFormat2(BaseModel):
120
- type: GetAllPromptsResponseFormatPromptsType
171
+ type: GetAllPromptsResponseFormatPromptsResponse200ApplicationJSONType
121
172
 
122
173
 
123
- GetAllPromptsResponseFormatType = Literal["json_schema",]
174
+ GetAllPromptsResponseFormatPromptsResponse200Type = Literal["json_schema",]
124
175
 
125
176
 
126
- class GetAllPromptsResponseFormatJSONSchemaTypedDict(TypedDict):
177
+ class GetAllPromptsResponseFormatPromptsResponseJSONSchemaTypedDict(TypedDict):
127
178
  name: str
128
179
  schema_: Dict[str, Any]
129
180
  description: NotRequired[str]
130
181
  strict: NotRequired[bool]
131
182
 
132
183
 
133
- class GetAllPromptsResponseFormatJSONSchema(BaseModel):
184
+ class GetAllPromptsResponseFormatPromptsResponseJSONSchema(BaseModel):
134
185
  name: str
135
186
 
136
187
  schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
@@ -139,23 +190,55 @@ class GetAllPromptsResponseFormatJSONSchema(BaseModel):
139
190
 
140
191
  strict: Optional[bool] = None
141
192
 
193
+ @model_serializer(mode="wrap")
194
+ def serialize_model(self, handler):
195
+ optional_fields = set(["description", "strict"])
196
+ serialized = handler(self)
197
+ m = {}
198
+
199
+ for n, f in type(self).model_fields.items():
200
+ k = f.alias or n
201
+ val = serialized.get(k)
202
+
203
+ if val != UNSET_SENTINEL:
204
+ if val is not None or k not in optional_fields:
205
+ m[k] = val
206
+
207
+ return m
208
+
142
209
 
143
210
  class GetAllPromptsResponseFormat1TypedDict(TypedDict):
144
- type: GetAllPromptsResponseFormatType
145
- json_schema: GetAllPromptsResponseFormatJSONSchemaTypedDict
211
+ type: GetAllPromptsResponseFormatPromptsResponse200Type
212
+ json_schema: GetAllPromptsResponseFormatPromptsResponseJSONSchemaTypedDict
146
213
  display_name: NotRequired[str]
147
214
 
148
215
 
149
216
  class GetAllPromptsResponseFormat1(BaseModel):
150
- type: GetAllPromptsResponseFormatType
217
+ type: GetAllPromptsResponseFormatPromptsResponse200Type
151
218
 
152
- json_schema: GetAllPromptsResponseFormatJSONSchema
219
+ json_schema: GetAllPromptsResponseFormatPromptsResponseJSONSchema
153
220
 
154
221
  display_name: Optional[str] = None
155
222
 
223
+ @model_serializer(mode="wrap")
224
+ def serialize_model(self, handler):
225
+ optional_fields = set(["display_name"])
226
+ serialized = handler(self)
227
+ m = {}
228
+
229
+ for n, f in type(self).model_fields.items():
230
+ k = f.alias or n
231
+ val = serialized.get(k)
232
+
233
+ if val != UNSET_SENTINEL:
234
+ if val is not None or k not in optional_fields:
235
+ m[k] = val
156
236
 
157
- GetAllPromptsResponseFormatTypedDict = TypeAliasType(
158
- "GetAllPromptsResponseFormatTypedDict",
237
+ return m
238
+
239
+
240
+ GetAllPromptsPromptsResponseFormatTypedDict = TypeAliasType(
241
+ "GetAllPromptsPromptsResponseFormatTypedDict",
159
242
  Union[
160
243
  GetAllPromptsResponseFormat2TypedDict,
161
244
  GetAllPromptsResponseFormat3TypedDict,
@@ -175,8 +258,8 @@ Important: when using JSON mode, you must also instruct the model to produce JSO
175
258
  """
176
259
 
177
260
 
178
- GetAllPromptsResponseFormat = TypeAliasType(
179
- "GetAllPromptsResponseFormat",
261
+ GetAllPromptsPromptsResponseFormat = TypeAliasType(
262
+ "GetAllPromptsPromptsResponseFormat",
180
263
  Union[
181
264
  GetAllPromptsResponseFormat2,
182
265
  GetAllPromptsResponseFormat3,
@@ -210,7 +293,7 @@ GetAllPromptsEncodingFormat = Literal[
210
293
  r"""The format to return the embeddings"""
211
294
 
212
295
 
213
- GetAllPromptsReasoningEffort = Literal[
296
+ GetAllPromptsPromptsReasoningEffort = Literal[
214
297
  "none",
215
298
  "disable",
216
299
  "minimal",
@@ -263,7 +346,7 @@ class GetAllPromptsModelParametersTypedDict(TypedDict):
263
346
  r"""Only supported on `image` models."""
264
347
  style: NotRequired[str]
265
348
  r"""Only supported on `image` models."""
266
- response_format: NotRequired[Nullable[GetAllPromptsResponseFormatTypedDict]]
349
+ response_format: NotRequired[Nullable[GetAllPromptsPromptsResponseFormatTypedDict]]
267
350
  r"""An object specifying the format that the model must output.
268
351
 
269
352
  Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
@@ -276,7 +359,7 @@ class GetAllPromptsModelParametersTypedDict(TypedDict):
276
359
  r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
277
360
  encoding_format: NotRequired[GetAllPromptsEncodingFormat]
278
361
  r"""The format to return the embeddings"""
279
- reasoning_effort: NotRequired[GetAllPromptsReasoningEffort]
362
+ reasoning_effort: NotRequired[GetAllPromptsPromptsReasoningEffort]
280
363
  r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
281
364
  budget_tokens: NotRequired[float]
282
365
  r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
@@ -332,7 +415,7 @@ class GetAllPromptsModelParameters(BaseModel):
332
415
  r"""Only supported on `image` models."""
333
416
 
334
417
  response_format: Annotated[
335
- OptionalNullable[GetAllPromptsResponseFormat],
418
+ OptionalNullable[GetAllPromptsPromptsResponseFormat],
336
419
  pydantic.Field(alias="responseFormat"),
337
420
  ] = UNSET
338
421
  r"""An object specifying the format that the model must output.
@@ -354,7 +437,8 @@ class GetAllPromptsModelParameters(BaseModel):
354
437
  r"""The format to return the embeddings"""
355
438
 
356
439
  reasoning_effort: Annotated[
357
- Optional[GetAllPromptsReasoningEffort], pydantic.Field(alias="reasoningEffort")
440
+ Optional[GetAllPromptsPromptsReasoningEffort],
441
+ pydantic.Field(alias="reasoningEffort"),
358
442
  ] = None
359
443
  r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
360
444
 
@@ -373,51 +457,48 @@ class GetAllPromptsModelParameters(BaseModel):
373
457
 
374
458
  @model_serializer(mode="wrap")
375
459
  def serialize_model(self, handler):
376
- optional_fields = [
377
- "temperature",
378
- "maxTokens",
379
- "topK",
380
- "topP",
381
- "frequencyPenalty",
382
- "presencePenalty",
383
- "numImages",
384
- "seed",
385
- "format",
386
- "dimensions",
387
- "quality",
388
- "style",
389
- "responseFormat",
390
- "photoRealVersion",
391
- "encoding_format",
392
- "reasoningEffort",
393
- "budgetTokens",
394
- "verbosity",
395
- "thinkingLevel",
396
- ]
397
- nullable_fields = ["responseFormat"]
398
- null_default_fields = []
399
-
460
+ optional_fields = set(
461
+ [
462
+ "temperature",
463
+ "maxTokens",
464
+ "topK",
465
+ "topP",
466
+ "frequencyPenalty",
467
+ "presencePenalty",
468
+ "numImages",
469
+ "seed",
470
+ "format",
471
+ "dimensions",
472
+ "quality",
473
+ "style",
474
+ "responseFormat",
475
+ "photoRealVersion",
476
+ "encoding_format",
477
+ "reasoningEffort",
478
+ "budgetTokens",
479
+ "verbosity",
480
+ "thinkingLevel",
481
+ ]
482
+ )
483
+ nullable_fields = set(["responseFormat"])
400
484
  serialized = handler(self)
401
-
402
485
  m = {}
403
486
 
404
487
  for n, f in type(self).model_fields.items():
405
488
  k = f.alias or n
406
489
  val = serialized.get(k)
407
- serialized.pop(k, None)
408
-
409
- optional_nullable = k in optional_fields and k in nullable_fields
410
- is_set = (
411
- self.__pydantic_fields_set__.intersection({n})
412
- or k in null_default_fields
413
- ) # pylint: disable=no-member
414
-
415
- if val is not None and val != UNSET_SENTINEL:
416
- m[k] = val
417
- elif val != UNSET_SENTINEL and (
418
- not k in optional_fields or (optional_nullable and is_set)
419
- ):
420
- m[k] = val
490
+ is_nullable_and_explicitly_set = (
491
+ k in nullable_fields
492
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
493
+ )
494
+
495
+ if val != UNSET_SENTINEL:
496
+ if (
497
+ val is not None
498
+ or k not in optional_fields
499
+ or is_nullable_and_explicitly_set
500
+ ):
501
+ m[k] = val
421
502
 
422
503
  return m
423
504
 
@@ -493,6 +574,22 @@ class GetAllPrompts2File(BaseModel):
493
574
  filename: Optional[str] = None
494
575
  r"""The name of the file, used when passing the file to the model as a string."""
495
576
 
577
+ @model_serializer(mode="wrap")
578
+ def serialize_model(self, handler):
579
+ optional_fields = set(["file_data", "uri", "mimeType", "filename"])
580
+ serialized = handler(self)
581
+ m = {}
582
+
583
+ for n, f in type(self).model_fields.items():
584
+ k = f.alias or n
585
+ val = serialized.get(k)
586
+
587
+ if val != UNSET_SENTINEL:
588
+ if val is not None or k not in optional_fields:
589
+ m[k] = val
590
+
591
+ return m
592
+
496
593
 
497
594
  class GetAllPrompts23TypedDict(TypedDict):
498
595
  type: GetAllPrompts2PromptsResponseType
@@ -529,6 +626,22 @@ class GetAllPrompts2ImageURL(BaseModel):
529
626
  detail: Optional[str] = None
530
627
  r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
531
628
 
629
+ @model_serializer(mode="wrap")
630
+ def serialize_model(self, handler):
631
+ optional_fields = set(["id", "detail"])
632
+ serialized = handler(self)
633
+ m = {}
634
+
635
+ for n, f in type(self).model_fields.items():
636
+ k = f.alias or n
637
+ val = serialized.get(k)
638
+
639
+ if val != UNSET_SENTINEL:
640
+ if val is not None or k not in optional_fields:
641
+ m[k] = val
642
+
643
+ return m
644
+
532
645
 
533
646
  class GetAllPrompts22TypedDict(TypedDict):
534
647
  r"""The image part of the prompt message. Only supported with vision models."""
@@ -591,7 +704,7 @@ GetAllPromptsContent = TypeAliasType(
591
704
  r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
592
705
 
593
706
 
594
- GetAllPromptsPromptsType = Literal["function",]
707
+ GetAllPromptsPromptsResponseType = Literal["function",]
595
708
 
596
709
 
597
710
  class GetAllPromptsFunctionTypedDict(TypedDict):
@@ -608,14 +721,14 @@ class GetAllPromptsFunction(BaseModel):
608
721
 
609
722
 
610
723
  class GetAllPromptsToolCallsTypedDict(TypedDict):
611
- type: GetAllPromptsPromptsType
724
+ type: GetAllPromptsPromptsResponseType
612
725
  function: GetAllPromptsFunctionTypedDict
613
726
  id: NotRequired[str]
614
727
  index: NotRequired[float]
615
728
 
616
729
 
617
730
  class GetAllPromptsToolCalls(BaseModel):
618
- type: GetAllPromptsPromptsType
731
+ type: GetAllPromptsPromptsResponseType
619
732
 
620
733
  function: GetAllPromptsFunction
621
734
 
@@ -623,6 +736,22 @@ class GetAllPromptsToolCalls(BaseModel):
623
736
 
624
737
  index: Optional[float] = None
625
738
 
739
+ @model_serializer(mode="wrap")
740
+ def serialize_model(self, handler):
741
+ optional_fields = set(["id", "index"])
742
+ serialized = handler(self)
743
+ m = {}
744
+
745
+ for n, f in type(self).model_fields.items():
746
+ k = f.alias or n
747
+ val = serialized.get(k)
748
+
749
+ if val != UNSET_SENTINEL:
750
+ if val is not None or k not in optional_fields:
751
+ m[k] = val
752
+
753
+ return m
754
+
626
755
 
627
756
  class GetAllPromptsMessagesTypedDict(TypedDict):
628
757
  role: GetAllPromptsRole
@@ -646,61 +775,62 @@ class GetAllPromptsMessages(BaseModel):
646
775
 
647
776
  @model_serializer(mode="wrap")
648
777
  def serialize_model(self, handler):
649
- optional_fields = ["tool_calls", "tool_call_id"]
650
- nullable_fields = ["content", "tool_call_id"]
651
- null_default_fields = []
652
-
778
+ optional_fields = set(["tool_calls", "tool_call_id"])
779
+ nullable_fields = set(["content", "tool_call_id"])
653
780
  serialized = handler(self)
654
-
655
781
  m = {}
656
782
 
657
783
  for n, f in type(self).model_fields.items():
658
784
  k = f.alias or n
659
785
  val = serialized.get(k)
660
- serialized.pop(k, None)
661
-
662
- optional_nullable = k in optional_fields and k in nullable_fields
663
- is_set = (
664
- self.__pydantic_fields_set__.intersection({n})
665
- or k in null_default_fields
666
- ) # pylint: disable=no-member
667
-
668
- if val is not None and val != UNSET_SENTINEL:
669
- m[k] = val
670
- elif val != UNSET_SENTINEL and (
671
- not k in optional_fields or (optional_nullable and is_set)
672
- ):
673
- m[k] = val
786
+ is_nullable_and_explicitly_set = (
787
+ k in nullable_fields
788
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
789
+ )
790
+
791
+ if val != UNSET_SENTINEL:
792
+ if (
793
+ val is not None
794
+ or k not in optional_fields
795
+ or is_nullable_and_explicitly_set
796
+ ):
797
+ m[k] = val
674
798
 
675
799
  return m
676
800
 
677
801
 
802
+ @deprecated(
803
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
804
+ )
678
805
  class GetAllPromptsPromptConfigTypedDict(TypedDict):
679
- r"""A list of messages compatible with the openAI schema"""
806
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
680
807
 
681
808
  messages: List[GetAllPromptsMessagesTypedDict]
682
809
  stream: NotRequired[bool]
683
- model: NotRequired[str]
810
+ model: NotRequired[Nullable[str]]
684
811
  model_db_id: NotRequired[Nullable[str]]
685
812
  r"""The id of the resource"""
686
813
  model_type: NotRequired[Nullable[GetAllPromptsModelType]]
687
814
  r"""The modality of the model"""
688
815
  model_parameters: NotRequired[GetAllPromptsModelParametersTypedDict]
689
816
  r"""Model Parameters: Not all parameters apply to every model"""
690
- provider: NotRequired[GetAllPromptsProvider]
817
+ provider: NotRequired[Nullable[GetAllPromptsProvider]]
691
818
  integration_id: NotRequired[Nullable[str]]
692
819
  r"""The ID of the integration to use"""
693
820
  version: NotRequired[str]
694
821
 
695
822
 
823
+ @deprecated(
824
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
825
+ )
696
826
  class GetAllPromptsPromptConfig(BaseModel):
697
- r"""A list of messages compatible with the openAI schema"""
827
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
698
828
 
699
829
  messages: List[GetAllPromptsMessages]
700
830
 
701
831
  stream: Optional[bool] = None
702
832
 
703
- model: Optional[str] = None
833
+ model: OptionalNullable[str] = UNSET
704
834
 
705
835
  model_db_id: OptionalNullable[str] = UNSET
706
836
  r"""The id of the resource"""
@@ -711,7 +841,7 @@ class GetAllPromptsPromptConfig(BaseModel):
711
841
  model_parameters: Optional[GetAllPromptsModelParameters] = None
712
842
  r"""Model Parameters: Not all parameters apply to every model"""
713
843
 
714
- provider: Optional[GetAllPromptsProvider] = None
844
+ provider: OptionalNullable[GetAllPromptsProvider] = UNSET
715
845
 
716
846
  integration_id: OptionalNullable[str] = UNSET
717
847
  r"""The ID of the integration to use"""
@@ -720,202 +850,1532 @@ class GetAllPromptsPromptConfig(BaseModel):
720
850
 
721
851
  @model_serializer(mode="wrap")
722
852
  def serialize_model(self, handler):
723
- optional_fields = [
724
- "stream",
725
- "model",
726
- "model_db_id",
727
- "model_type",
728
- "model_parameters",
729
- "provider",
730
- "integration_id",
731
- "version",
732
- ]
733
- nullable_fields = ["model_db_id", "model_type", "integration_id"]
734
- null_default_fields = []
735
-
853
+ optional_fields = set(
854
+ [
855
+ "stream",
856
+ "model",
857
+ "model_db_id",
858
+ "model_type",
859
+ "model_parameters",
860
+ "provider",
861
+ "integration_id",
862
+ "version",
863
+ ]
864
+ )
865
+ nullable_fields = set(
866
+ ["model", "model_db_id", "model_type", "provider", "integration_id"]
867
+ )
736
868
  serialized = handler(self)
737
-
738
869
  m = {}
739
870
 
740
871
  for n, f in type(self).model_fields.items():
741
872
  k = f.alias or n
742
873
  val = serialized.get(k)
743
- serialized.pop(k, None)
744
-
745
- optional_nullable = k in optional_fields and k in nullable_fields
746
- is_set = (
747
- self.__pydantic_fields_set__.intersection({n})
748
- or k in null_default_fields
749
- ) # pylint: disable=no-member
750
-
751
- if val is not None and val != UNSET_SENTINEL:
752
- m[k] = val
753
- elif val != UNSET_SENTINEL and (
754
- not k in optional_fields or (optional_nullable and is_set)
755
- ):
756
- m[k] = val
874
+ is_nullable_and_explicitly_set = (
875
+ k in nullable_fields
876
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
877
+ )
878
+
879
+ if val != UNSET_SENTINEL:
880
+ if (
881
+ val is not None
882
+ or k not in optional_fields
883
+ or is_nullable_and_explicitly_set
884
+ ):
885
+ m[k] = val
757
886
 
758
887
  return m
759
888
 
760
889
 
761
- GetAllPromptsUseCases = Literal[
762
- "Agents simulations",
763
- "Agents",
764
- "API interaction",
765
- "Autonomous Agents",
766
- "Chatbots",
767
- "Classification",
768
- "Code understanding",
769
- "Code writing",
770
- "Conversation",
771
- "Documents QA",
772
- "Evaluation",
773
- "Extraction",
774
- "Multi-modal",
775
- "Self-checking",
776
- "Sentiment analysis",
777
- "SQL",
778
- "Summarization",
779
- "Tagging",
780
- "Translation (document)",
781
- "Translation (sentences)",
890
+ GetAllPromptsVoice = Literal[
891
+ "alloy",
892
+ "echo",
893
+ "fable",
894
+ "onyx",
895
+ "nova",
896
+ "shimmer",
782
897
  ]
898
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
783
899
 
784
900
 
785
- GetAllPromptsLanguage = Literal[
786
- "Chinese",
787
- "Dutch",
788
- "English",
789
- "French",
790
- "German",
791
- "Russian",
792
- "Spanish",
901
+ GetAllPromptsPromptsFormat = Literal[
902
+ "wav",
903
+ "mp3",
904
+ "flac",
905
+ "opus",
906
+ "pcm16",
793
907
  ]
794
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
908
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
795
909
 
796
910
 
797
- class GetAllPromptsMetadataTypedDict(TypedDict):
798
- use_cases: NotRequired[List[GetAllPromptsUseCases]]
799
- r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
800
- language: NotRequired[Nullable[GetAllPromptsLanguage]]
801
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
911
+ class GetAllPromptsAudioTypedDict(TypedDict):
912
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
802
913
 
914
+ voice: GetAllPromptsVoice
915
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
916
+ format_: GetAllPromptsPromptsFormat
917
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
803
918
 
804
- class GetAllPromptsMetadata(BaseModel):
805
- use_cases: Optional[List[GetAllPromptsUseCases]] = None
806
- r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
807
919
 
808
- language: OptionalNullable[GetAllPromptsLanguage] = UNSET
809
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
920
+ class GetAllPromptsAudio(BaseModel):
921
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
922
+
923
+ voice: GetAllPromptsVoice
924
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
925
+
926
+ format_: Annotated[GetAllPromptsPromptsFormat, pydantic.Field(alias="format")]
927
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
928
+
929
+
930
+ GetAllPromptsResponseFormatPromptsResponseType = Literal["json_schema",]
931
+
932
+
933
+ class GetAllPromptsResponseFormatJSONSchemaTypedDict(TypedDict):
934
+ name: str
935
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
936
+ description: NotRequired[str]
937
+ r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
938
+ schema_: NotRequired[Any]
939
+ r"""The schema for the response format, described as a JSON Schema object."""
940
+ strict: NotRequired[bool]
941
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
942
+
943
+
944
+ class GetAllPromptsResponseFormatJSONSchema(BaseModel):
945
+ name: str
946
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
947
+
948
+ description: Optional[str] = None
949
+ r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
950
+
951
+ schema_: Annotated[Optional[Any], pydantic.Field(alias="schema")] = None
952
+ r"""The schema for the response format, described as a JSON Schema object."""
953
+
954
+ strict: Optional[bool] = False
955
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
810
956
 
811
957
  @model_serializer(mode="wrap")
812
958
  def serialize_model(self, handler):
813
- optional_fields = ["use_cases", "language"]
814
- nullable_fields = ["language"]
815
- null_default_fields = []
816
-
959
+ optional_fields = set(["description", "schema", "strict"])
817
960
  serialized = handler(self)
818
-
819
961
  m = {}
820
962
 
821
963
  for n, f in type(self).model_fields.items():
822
964
  k = f.alias or n
823
965
  val = serialized.get(k)
824
- serialized.pop(k, None)
825
-
826
- optional_nullable = k in optional_fields and k in nullable_fields
827
- is_set = (
828
- self.__pydantic_fields_set__.intersection({n})
829
- or k in null_default_fields
830
- ) # pylint: disable=no-member
831
966
 
832
- if val is not None and val != UNSET_SENTINEL:
833
- m[k] = val
834
- elif val != UNSET_SENTINEL and (
835
- not k in optional_fields or (optional_nullable and is_set)
836
- ):
837
- m[k] = val
967
+ if val != UNSET_SENTINEL:
968
+ if val is not None or k not in optional_fields:
969
+ m[k] = val
838
970
 
839
971
  return m
840
972
 
841
973
 
842
- class GetAllPromptsPromptTypedDict(TypedDict):
843
- r"""A prompt entity with configuration, metadata, and versioning."""
974
+ class GetAllPromptsResponseFormatPromptsJSONSchemaTypedDict(TypedDict):
975
+ r"""
844
976
 
845
- id: str
846
- type: GetAllPromptsType
847
- owner: str
848
- domain_id: str
849
- created: str
850
- updated: str
851
- display_name: str
852
- r"""The prompt’s name, meant to be displayable in the UI."""
853
- prompt_config: GetAllPromptsPromptConfigTypedDict
854
- r"""A list of messages compatible with the openAI schema"""
855
- created_by_id: NotRequired[Nullable[str]]
856
- updated_by_id: NotRequired[Nullable[str]]
857
- description: NotRequired[Nullable[str]]
858
- r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
859
- metadata: NotRequired[GetAllPromptsMetadataTypedDict]
977
+ JSON Schema response format. Used to generate structured JSON responses
978
+ """
860
979
 
980
+ type: GetAllPromptsResponseFormatPromptsResponseType
981
+ json_schema: GetAllPromptsResponseFormatJSONSchemaTypedDict
861
982
 
862
- class GetAllPromptsPrompt(BaseModel):
863
- r"""A prompt entity with configuration, metadata, and versioning."""
864
983
 
865
- id: Annotated[str, pydantic.Field(alias="_id")]
984
+ class GetAllPromptsResponseFormatPromptsJSONSchema(BaseModel):
985
+ r"""
866
986
 
867
- type: GetAllPromptsType
987
+ JSON Schema response format. Used to generate structured JSON responses
988
+ """
868
989
 
869
- owner: str
990
+ type: GetAllPromptsResponseFormatPromptsResponseType
870
991
 
871
- domain_id: str
992
+ json_schema: GetAllPromptsResponseFormatJSONSchema
872
993
 
873
- created: str
874
994
 
875
- updated: str
995
+ GetAllPromptsResponseFormatPromptsType = Literal["json_object",]
876
996
 
877
- display_name: str
878
- r"""The prompt’s name, meant to be displayable in the UI."""
879
997
 
880
- prompt_config: GetAllPromptsPromptConfig
881
- r"""A list of messages compatible with the openAI schema"""
998
+ class GetAllPromptsResponseFormatJSONObjectTypedDict(TypedDict):
999
+ r"""
882
1000
 
883
- created_by_id: OptionalNullable[str] = UNSET
1001
+ JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
1002
+ """
884
1003
 
885
- updated_by_id: OptionalNullable[str] = UNSET
1004
+ type: GetAllPromptsResponseFormatPromptsType
886
1005
 
887
- description: OptionalNullable[str] = UNSET
888
- r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
889
1006
 
890
- metadata: Optional[GetAllPromptsMetadata] = None
1007
+ class GetAllPromptsResponseFormatJSONObject(BaseModel):
1008
+ r"""
891
1009
 
892
- @model_serializer(mode="wrap")
893
- def serialize_model(self, handler):
894
- optional_fields = ["created_by_id", "updated_by_id", "description", "metadata"]
895
- nullable_fields = ["created_by_id", "updated_by_id", "description"]
896
- null_default_fields = []
1010
+ JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
1011
+ """
897
1012
 
898
- serialized = handler(self)
1013
+ type: GetAllPromptsResponseFormatPromptsType
1014
+
1015
+
1016
+ GetAllPromptsResponseFormatType = Literal["text",]
899
1017
 
1018
+
1019
+ class GetAllPromptsResponseFormatTextTypedDict(TypedDict):
1020
+ r"""
1021
+
1022
+ Default response format. Used to generate text responses
1023
+ """
1024
+
1025
+ type: GetAllPromptsResponseFormatType
1026
+
1027
+
1028
+ class GetAllPromptsResponseFormatText(BaseModel):
1029
+ r"""
1030
+
1031
+ Default response format. Used to generate text responses
1032
+ """
1033
+
1034
+ type: GetAllPromptsResponseFormatType
1035
+
1036
+
1037
+ GetAllPromptsResponseFormatTypedDict = TypeAliasType(
1038
+ "GetAllPromptsResponseFormatTypedDict",
1039
+ Union[
1040
+ GetAllPromptsResponseFormatTextTypedDict,
1041
+ GetAllPromptsResponseFormatJSONObjectTypedDict,
1042
+ GetAllPromptsResponseFormatPromptsJSONSchemaTypedDict,
1043
+ ],
1044
+ )
1045
+ r"""An object specifying the format that the model must output"""
1046
+
1047
+
1048
+ GetAllPromptsResponseFormat = Annotated[
1049
+ Union[
1050
+ Annotated[GetAllPromptsResponseFormatText, Tag("text")],
1051
+ Annotated[GetAllPromptsResponseFormatJSONObject, Tag("json_object")],
1052
+ Annotated[GetAllPromptsResponseFormatPromptsJSONSchema, Tag("json_schema")],
1053
+ ],
1054
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1055
+ ]
1056
+ r"""An object specifying the format that the model must output"""
1057
+
1058
+
1059
+ GetAllPromptsReasoningEffort = Literal[
1060
+ "none",
1061
+ "minimal",
1062
+ "low",
1063
+ "medium",
1064
+ "high",
1065
+ "xhigh",
1066
+ ]
1067
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1068
+
1069
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1070
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1071
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1072
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1073
+
1074
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1075
+ """
1076
+
1077
+
1078
+ GetAllPromptsStopTypedDict = TypeAliasType(
1079
+ "GetAllPromptsStopTypedDict", Union[str, List[str]]
1080
+ )
1081
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1082
+
1083
+
1084
+ GetAllPromptsStop = TypeAliasType("GetAllPromptsStop", Union[str, List[str]])
1085
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1086
+
1087
+
1088
+ class GetAllPromptsStreamOptionsTypedDict(TypedDict):
1089
+ r"""Options for streaming response. Only set this when you set stream: true."""
1090
+
1091
+ include_usage: NotRequired[bool]
1092
+ r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
1093
+
1094
+
1095
+ class GetAllPromptsStreamOptions(BaseModel):
1096
+ r"""Options for streaming response. Only set this when you set stream: true."""
1097
+
1098
+ include_usage: Optional[bool] = None
1099
+ r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
1100
+
1101
+ @model_serializer(mode="wrap")
1102
+ def serialize_model(self, handler):
1103
+ optional_fields = set(["include_usage"])
1104
+ serialized = handler(self)
1105
+ m = {}
1106
+
1107
+ for n, f in type(self).model_fields.items():
1108
+ k = f.alias or n
1109
+ val = serialized.get(k)
1110
+
1111
+ if val != UNSET_SENTINEL:
1112
+ if val is not None or k not in optional_fields:
1113
+ m[k] = val
1114
+
1115
+ return m
1116
+
1117
+
1118
+ GetAllPromptsThinkingTypedDict = TypeAliasType(
1119
+ "GetAllPromptsThinkingTypedDict",
1120
+ Union[ThinkingConfigDisabledSchemaTypedDict, ThinkingConfigEnabledSchemaTypedDict],
1121
+ )
1122
+
1123
+
1124
+ GetAllPromptsThinking = Annotated[
1125
+ Union[
1126
+ Annotated[ThinkingConfigDisabledSchema, Tag("disabled")],
1127
+ Annotated[ThinkingConfigEnabledSchema, Tag("enabled")],
1128
+ ],
1129
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1130
+ ]
1131
+
1132
+
1133
+ GetAllPromptsToolChoiceType = Literal["function",]
1134
+ r"""The type of the tool. Currently, only function is supported."""
1135
+
1136
+
1137
+ class GetAllPromptsToolChoiceFunctionTypedDict(TypedDict):
1138
+ name: str
1139
+ r"""The name of the function to call."""
1140
+
1141
+
1142
+ class GetAllPromptsToolChoiceFunction(BaseModel):
1143
+ name: str
1144
+ r"""The name of the function to call."""
1145
+
1146
+
1147
+ class GetAllPromptsToolChoice2TypedDict(TypedDict):
1148
+ function: GetAllPromptsToolChoiceFunctionTypedDict
1149
+ type: NotRequired[GetAllPromptsToolChoiceType]
1150
+ r"""The type of the tool. Currently, only function is supported."""
1151
+
1152
+
1153
+ class GetAllPromptsToolChoice2(BaseModel):
1154
+ function: GetAllPromptsToolChoiceFunction
1155
+
1156
+ type: Optional[GetAllPromptsToolChoiceType] = None
1157
+ r"""The type of the tool. Currently, only function is supported."""
1158
+
1159
+ @model_serializer(mode="wrap")
1160
+ def serialize_model(self, handler):
1161
+ optional_fields = set(["type"])
1162
+ serialized = handler(self)
1163
+ m = {}
1164
+
1165
+ for n, f in type(self).model_fields.items():
1166
+ k = f.alias or n
1167
+ val = serialized.get(k)
1168
+
1169
+ if val != UNSET_SENTINEL:
1170
+ if val is not None or k not in optional_fields:
1171
+ m[k] = val
1172
+
1173
+ return m
1174
+
1175
+
1176
+ GetAllPromptsToolChoice1 = Literal[
1177
+ "none",
1178
+ "auto",
1179
+ "required",
1180
+ ]
1181
+
1182
+
1183
+ GetAllPromptsToolChoiceTypedDict = TypeAliasType(
1184
+ "GetAllPromptsToolChoiceTypedDict",
1185
+ Union[GetAllPromptsToolChoice2TypedDict, GetAllPromptsToolChoice1],
1186
+ )
1187
+ r"""Controls which (if any) tool is called by the model."""
1188
+
1189
+
1190
+ GetAllPromptsToolChoice = TypeAliasType(
1191
+ "GetAllPromptsToolChoice", Union[GetAllPromptsToolChoice2, GetAllPromptsToolChoice1]
1192
+ )
1193
+ r"""Controls which (if any) tool is called by the model."""
1194
+
1195
+
1196
+ GetAllPromptsModalities = Literal[
1197
+ "text",
1198
+ "audio",
1199
+ ]
1200
+
1201
+
1202
+ GetAllPromptsID1 = Literal[
1203
+ "orq_pii_detection",
1204
+ "orq_sexual_moderation",
1205
+ "orq_harmful_moderation",
1206
+ ]
1207
+ r"""The key of the guardrail."""
1208
+
1209
+
1210
+ GetAllPromptsIDTypedDict = TypeAliasType(
1211
+ "GetAllPromptsIDTypedDict", Union[GetAllPromptsID1, str]
1212
+ )
1213
+
1214
+
1215
+ GetAllPromptsID = TypeAliasType("GetAllPromptsID", Union[GetAllPromptsID1, str])
1216
+
1217
+
1218
+ GetAllPromptsExecuteOn = Literal[
1219
+ "input",
1220
+ "output",
1221
+ ]
1222
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1223
+
1224
+
1225
+ class GetAllPromptsGuardrailsTypedDict(TypedDict):
1226
+ id: GetAllPromptsIDTypedDict
1227
+ execute_on: GetAllPromptsExecuteOn
1228
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1229
+
1230
+
1231
+ class GetAllPromptsGuardrails(BaseModel):
1232
+ id: GetAllPromptsID
1233
+
1234
+ execute_on: GetAllPromptsExecuteOn
1235
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1236
+
1237
+
1238
+ class GetAllPromptsFallbacksTypedDict(TypedDict):
1239
+ model: str
1240
+ r"""Fallback model identifier"""
1241
+
1242
+
1243
+ class GetAllPromptsFallbacks(BaseModel):
1244
+ model: str
1245
+ r"""Fallback model identifier"""
1246
+
1247
+
1248
+ class GetAllPromptsRetryTypedDict(TypedDict):
1249
+ r"""Retry configuration for the request"""
1250
+
1251
+ count: NotRequired[float]
1252
+ r"""Number of retry attempts (1-5)"""
1253
+ on_codes: NotRequired[List[float]]
1254
+ r"""HTTP status codes that trigger retry logic"""
1255
+
1256
+
1257
+ class GetAllPromptsRetry(BaseModel):
1258
+ r"""Retry configuration for the request"""
1259
+
1260
+ count: Optional[float] = 3
1261
+ r"""Number of retry attempts (1-5)"""
1262
+
1263
+ on_codes: Optional[List[float]] = None
1264
+ r"""HTTP status codes that trigger retry logic"""
1265
+
1266
+ @model_serializer(mode="wrap")
1267
+ def serialize_model(self, handler):
1268
+ optional_fields = set(["count", "on_codes"])
1269
+ serialized = handler(self)
1270
+ m = {}
1271
+
1272
+ for n, f in type(self).model_fields.items():
1273
+ k = f.alias or n
1274
+ val = serialized.get(k)
1275
+
1276
+ if val != UNSET_SENTINEL:
1277
+ if val is not None or k not in optional_fields:
1278
+ m[k] = val
1279
+
1280
+ return m
1281
+
1282
+
1283
+ GetAllPromptsPromptsType = Literal["exact_match",]
1284
+
1285
+
1286
+ class GetAllPromptsCacheTypedDict(TypedDict):
1287
+ r"""Cache configuration for the request."""
1288
+
1289
+ type: GetAllPromptsPromptsType
1290
+ ttl: NotRequired[float]
1291
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1292
+
1293
+
1294
+ class GetAllPromptsCache(BaseModel):
1295
+ r"""Cache configuration for the request."""
1296
+
1297
+ type: GetAllPromptsPromptsType
1298
+
1299
+ ttl: Optional[float] = 1800
1300
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1301
+
1302
+ @model_serializer(mode="wrap")
1303
+ def serialize_model(self, handler):
1304
+ optional_fields = set(["ttl"])
1305
+ serialized = handler(self)
1306
+ m = {}
1307
+
1308
+ for n, f in type(self).model_fields.items():
1309
+ k = f.alias or n
1310
+ val = serialized.get(k)
1311
+
1312
+ if val != UNSET_SENTINEL:
1313
+ if val is not None or k not in optional_fields:
1314
+ m[k] = val
1315
+
1316
+ return m
1317
+
1318
+
1319
+ GetAllPromptsLoadBalancerType = Literal["weight_based",]
1320
+
1321
+
1322
+ class GetAllPromptsLoadBalancerModelsTypedDict(TypedDict):
1323
+ model: str
1324
+ r"""Model identifier for load balancing"""
1325
+ weight: NotRequired[float]
1326
+ r"""Weight assigned to this model for load balancing"""
1327
+
1328
+
1329
+ class GetAllPromptsLoadBalancerModels(BaseModel):
1330
+ model: str
1331
+ r"""Model identifier for load balancing"""
1332
+
1333
+ weight: Optional[float] = 0.5
1334
+ r"""Weight assigned to this model for load balancing"""
1335
+
1336
+ @model_serializer(mode="wrap")
1337
+ def serialize_model(self, handler):
1338
+ optional_fields = set(["weight"])
1339
+ serialized = handler(self)
1340
+ m = {}
1341
+
1342
+ for n, f in type(self).model_fields.items():
1343
+ k = f.alias or n
1344
+ val = serialized.get(k)
1345
+
1346
+ if val != UNSET_SENTINEL:
1347
+ if val is not None or k not in optional_fields:
1348
+ m[k] = val
1349
+
1350
+ return m
1351
+
1352
+
1353
+ class GetAllPromptsLoadBalancer1TypedDict(TypedDict):
1354
+ type: GetAllPromptsLoadBalancerType
1355
+ models: List[GetAllPromptsLoadBalancerModelsTypedDict]
1356
+
1357
+
1358
+ class GetAllPromptsLoadBalancer1(BaseModel):
1359
+ type: GetAllPromptsLoadBalancerType
1360
+
1361
+ models: List[GetAllPromptsLoadBalancerModels]
1362
+
1363
+
1364
+ GetAllPromptsLoadBalancerTypedDict = GetAllPromptsLoadBalancer1TypedDict
1365
+ r"""Load balancer configuration for the request."""
1366
+
1367
+
1368
+ GetAllPromptsLoadBalancer = GetAllPromptsLoadBalancer1
1369
+ r"""Load balancer configuration for the request."""
1370
+
1371
+
1372
+ class GetAllPromptsTimeoutTypedDict(TypedDict):
1373
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1374
+
1375
+ call_timeout: float
1376
+ r"""Timeout value in milliseconds"""
1377
+
1378
+
1379
+ class GetAllPromptsTimeout(BaseModel):
1380
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1381
+
1382
+ call_timeout: float
1383
+ r"""Timeout value in milliseconds"""
1384
+
1385
+
1386
+ GetAllPromptsMessagesPromptsResponse200Role = Literal["tool",]
1387
+ r"""The role of the messages author, in this case tool."""
1388
+
1389
+
1390
+ GetAllPromptsContentPromptsResponse2002TypedDict = TextContentPartSchemaTypedDict
1391
+
1392
+
1393
+ GetAllPromptsContentPromptsResponse2002 = TextContentPartSchema
1394
+
1395
+
1396
+ GetAllPromptsMessagesPromptsResponse200ContentTypedDict = TypeAliasType(
1397
+ "GetAllPromptsMessagesPromptsResponse200ContentTypedDict",
1398
+ Union[str, List[GetAllPromptsContentPromptsResponse2002TypedDict]],
1399
+ )
1400
+ r"""The contents of the tool message."""
1401
+
1402
+
1403
+ GetAllPromptsMessagesPromptsResponse200Content = TypeAliasType(
1404
+ "GetAllPromptsMessagesPromptsResponse200Content",
1405
+ Union[str, List[GetAllPromptsContentPromptsResponse2002]],
1406
+ )
1407
+ r"""The contents of the tool message."""
1408
+
1409
+
1410
+ GetAllPromptsMessagesPromptsType = Literal["ephemeral",]
1411
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1412
+
1413
+
1414
+ GetAllPromptsMessagesTTL = Literal[
1415
+ "5m",
1416
+ "1h",
1417
+ ]
1418
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1419
+
1420
+ - `5m`: 5 minutes
1421
+ - `1h`: 1 hour
1422
+
1423
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1424
+ """
1425
+
1426
+
1427
+ class GetAllPromptsMessagesCacheControlTypedDict(TypedDict):
1428
+ type: GetAllPromptsMessagesPromptsType
1429
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1430
+ ttl: NotRequired[GetAllPromptsMessagesTTL]
1431
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1432
+
1433
+ - `5m`: 5 minutes
1434
+ - `1h`: 1 hour
1435
+
1436
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1437
+ """
1438
+
1439
+
1440
+ class GetAllPromptsMessagesCacheControl(BaseModel):
1441
+ type: GetAllPromptsMessagesPromptsType
1442
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1443
+
1444
+ ttl: Optional[GetAllPromptsMessagesTTL] = "5m"
1445
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1446
+
1447
+ - `5m`: 5 minutes
1448
+ - `1h`: 1 hour
1449
+
1450
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1451
+ """
1452
+
1453
+ @model_serializer(mode="wrap")
1454
+ def serialize_model(self, handler):
1455
+ optional_fields = set(["ttl"])
1456
+ serialized = handler(self)
1457
+ m = {}
1458
+
1459
+ for n, f in type(self).model_fields.items():
1460
+ k = f.alias or n
1461
+ val = serialized.get(k)
1462
+
1463
+ if val != UNSET_SENTINEL:
1464
+ if val is not None or k not in optional_fields:
1465
+ m[k] = val
1466
+
1467
+ return m
1468
+
1469
+
1470
+ class GetAllPromptsMessagesToolMessageTypedDict(TypedDict):
1471
+ role: GetAllPromptsMessagesPromptsResponse200Role
1472
+ r"""The role of the messages author, in this case tool."""
1473
+ content: GetAllPromptsMessagesPromptsResponse200ContentTypedDict
1474
+ r"""The contents of the tool message."""
1475
+ tool_call_id: Nullable[str]
1476
+ r"""Tool call that this message is responding to."""
1477
+ cache_control: NotRequired[GetAllPromptsMessagesCacheControlTypedDict]
1478
+
1479
+
1480
+ class GetAllPromptsMessagesToolMessage(BaseModel):
1481
+ role: GetAllPromptsMessagesPromptsResponse200Role
1482
+ r"""The role of the messages author, in this case tool."""
1483
+
1484
+ content: GetAllPromptsMessagesPromptsResponse200Content
1485
+ r"""The contents of the tool message."""
1486
+
1487
+ tool_call_id: Nullable[str]
1488
+ r"""Tool call that this message is responding to."""
1489
+
1490
+ cache_control: Optional[GetAllPromptsMessagesCacheControl] = None
1491
+
1492
+ @model_serializer(mode="wrap")
1493
+ def serialize_model(self, handler):
1494
+ optional_fields = set(["cache_control"])
1495
+ nullable_fields = set(["tool_call_id"])
1496
+ serialized = handler(self)
1497
+ m = {}
1498
+
1499
+ for n, f in type(self).model_fields.items():
1500
+ k = f.alias or n
1501
+ val = serialized.get(k)
1502
+ is_nullable_and_explicitly_set = (
1503
+ k in nullable_fields
1504
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1505
+ )
1506
+
1507
+ if val != UNSET_SENTINEL:
1508
+ if (
1509
+ val is not None
1510
+ or k not in optional_fields
1511
+ or is_nullable_and_explicitly_set
1512
+ ):
1513
+ m[k] = val
1514
+
1515
+ return m
1516
+
1517
+
1518
+ GetAllPromptsContentPromptsResponse2TypedDict = TypeAliasType(
1519
+ "GetAllPromptsContentPromptsResponse2TypedDict",
1520
+ Union[
1521
+ RefusalPartSchemaTypedDict,
1522
+ RedactedReasoningPartSchemaTypedDict,
1523
+ TextContentPartSchemaTypedDict,
1524
+ ReasoningPartSchemaTypedDict,
1525
+ ],
1526
+ )
1527
+
1528
+
1529
+ GetAllPromptsContentPromptsResponse2 = Annotated[
1530
+ Union[
1531
+ Annotated[TextContentPartSchema, Tag("text")],
1532
+ Annotated[RefusalPartSchema, Tag("refusal")],
1533
+ Annotated[ReasoningPartSchema, Tag("reasoning")],
1534
+ Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
1535
+ ],
1536
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1537
+ ]
1538
+
1539
+
1540
+ GetAllPromptsMessagesPromptsResponseContentTypedDict = TypeAliasType(
1541
+ "GetAllPromptsMessagesPromptsResponseContentTypedDict",
1542
+ Union[str, List[GetAllPromptsContentPromptsResponse2TypedDict]],
1543
+ )
1544
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1545
+
1546
+
1547
+ GetAllPromptsMessagesPromptsResponseContent = TypeAliasType(
1548
+ "GetAllPromptsMessagesPromptsResponseContent",
1549
+ Union[str, List[GetAllPromptsContentPromptsResponse2]],
1550
+ )
1551
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1552
+
1553
+
1554
+ GetAllPromptsMessagesPromptsResponseRole = Literal["assistant",]
1555
+ r"""The role of the messages author, in this case `assistant`."""
1556
+
1557
+
1558
+ class GetAllPromptsMessagesAudioTypedDict(TypedDict):
1559
+ r"""Data about a previous audio response from the model."""
1560
+
1561
+ id: str
1562
+ r"""Unique identifier for a previous audio response from the model."""
1563
+
1564
+
1565
+ class GetAllPromptsMessagesAudio(BaseModel):
1566
+ r"""Data about a previous audio response from the model."""
1567
+
1568
+ id: str
1569
+ r"""Unique identifier for a previous audio response from the model."""
1570
+
1571
+
1572
+ GetAllPromptsMessagesType = Literal["function",]
1573
+ r"""The type of the tool. Currently, only `function` is supported."""
1574
+
1575
+
1576
+ class GetAllPromptsMessagesFunctionTypedDict(TypedDict):
1577
+ name: NotRequired[str]
1578
+ r"""The name of the function to call."""
1579
+ arguments: NotRequired[str]
1580
+ r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
1581
+
1582
+
1583
+ class GetAllPromptsMessagesFunction(BaseModel):
1584
+ name: Optional[str] = None
1585
+ r"""The name of the function to call."""
1586
+
1587
+ arguments: Optional[str] = None
1588
+ r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
1589
+
1590
+ @model_serializer(mode="wrap")
1591
+ def serialize_model(self, handler):
1592
+ optional_fields = set(["name", "arguments"])
1593
+ serialized = handler(self)
1594
+ m = {}
1595
+
1596
+ for n, f in type(self).model_fields.items():
1597
+ k = f.alias or n
1598
+ val = serialized.get(k)
1599
+
1600
+ if val != UNSET_SENTINEL:
1601
+ if val is not None or k not in optional_fields:
1602
+ m[k] = val
1603
+
1604
+ return m
1605
+
1606
+
1607
+ class GetAllPromptsMessagesToolCallsTypedDict(TypedDict):
1608
+ id: str
1609
+ r"""The ID of the tool call."""
1610
+ type: GetAllPromptsMessagesType
1611
+ r"""The type of the tool. Currently, only `function` is supported."""
1612
+ function: GetAllPromptsMessagesFunctionTypedDict
1613
+ thought_signature: NotRequired[str]
1614
+ r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
1615
+
1616
+
1617
+ class GetAllPromptsMessagesToolCalls(BaseModel):
1618
+ id: str
1619
+ r"""The ID of the tool call."""
1620
+
1621
+ type: GetAllPromptsMessagesType
1622
+ r"""The type of the tool. Currently, only `function` is supported."""
1623
+
1624
+ function: GetAllPromptsMessagesFunction
1625
+
1626
+ thought_signature: Optional[str] = None
1627
+ r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
1628
+
1629
+ @model_serializer(mode="wrap")
1630
+ def serialize_model(self, handler):
1631
+ optional_fields = set(["thought_signature"])
1632
+ serialized = handler(self)
1633
+ m = {}
1634
+
1635
+ for n, f in type(self).model_fields.items():
1636
+ k = f.alias or n
1637
+ val = serialized.get(k)
1638
+
1639
+ if val != UNSET_SENTINEL:
1640
+ if val is not None or k not in optional_fields:
1641
+ m[k] = val
1642
+
1643
+ return m
1644
+
1645
+
1646
+ class GetAllPromptsMessagesAssistantMessageTypedDict(TypedDict):
1647
+ role: GetAllPromptsMessagesPromptsResponseRole
1648
+ r"""The role of the messages author, in this case `assistant`."""
1649
+ content: NotRequired[Nullable[GetAllPromptsMessagesPromptsResponseContentTypedDict]]
1650
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1651
+ refusal: NotRequired[Nullable[str]]
1652
+ r"""The refusal message by the assistant."""
1653
+ name: NotRequired[str]
1654
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1655
+ audio: NotRequired[Nullable[GetAllPromptsMessagesAudioTypedDict]]
1656
+ r"""Data about a previous audio response from the model."""
1657
+ tool_calls: NotRequired[List[GetAllPromptsMessagesToolCallsTypedDict]]
1658
+ r"""The tool calls generated by the model, such as function calls."""
1659
+
1660
+
1661
+ class GetAllPromptsMessagesAssistantMessage(BaseModel):
1662
+ role: GetAllPromptsMessagesPromptsResponseRole
1663
+ r"""The role of the messages author, in this case `assistant`."""
1664
+
1665
+ content: OptionalNullable[GetAllPromptsMessagesPromptsResponseContent] = UNSET
1666
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1667
+
1668
+ refusal: OptionalNullable[str] = UNSET
1669
+ r"""The refusal message by the assistant."""
1670
+
1671
+ name: Optional[str] = None
1672
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1673
+
1674
+ audio: OptionalNullable[GetAllPromptsMessagesAudio] = UNSET
1675
+ r"""Data about a previous audio response from the model."""
1676
+
1677
+ tool_calls: Optional[List[GetAllPromptsMessagesToolCalls]] = None
1678
+ r"""The tool calls generated by the model, such as function calls."""
1679
+
1680
+ @model_serializer(mode="wrap")
1681
+ def serialize_model(self, handler):
1682
+ optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
1683
+ nullable_fields = set(["content", "refusal", "audio"])
1684
+ serialized = handler(self)
1685
+ m = {}
1686
+
1687
+ for n, f in type(self).model_fields.items():
1688
+ k = f.alias or n
1689
+ val = serialized.get(k)
1690
+ is_nullable_and_explicitly_set = (
1691
+ k in nullable_fields
1692
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1693
+ )
1694
+
1695
+ if val != UNSET_SENTINEL:
1696
+ if (
1697
+ val is not None
1698
+ or k not in optional_fields
1699
+ or is_nullable_and_explicitly_set
1700
+ ):
1701
+ m[k] = val
1702
+
1703
+ return m
1704
+
1705
+
1706
+ GetAllPromptsMessagesPromptsRole = Literal["user",]
1707
+ r"""The role of the messages author, in this case `user`."""
1708
+
1709
+
1710
+ GetAllPrompts2PromptsResponse200Type = Literal["file",]
1711
+ r"""The type of the content part. Always `file`."""
1712
+
1713
+
1714
+ GetAllPrompts2PromptsResponse200ApplicationJSONType = Literal["ephemeral",]
1715
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1716
+
1717
+
1718
+ GetAllPrompts2TTL = Literal[
1719
+ "5m",
1720
+ "1h",
1721
+ ]
1722
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1723
+
1724
+ - `5m`: 5 minutes
1725
+ - `1h`: 1 hour
1726
+
1727
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1728
+ """
1729
+
1730
+
1731
+ class GetAllPrompts2CacheControlTypedDict(TypedDict):
1732
+ type: GetAllPrompts2PromptsResponse200ApplicationJSONType
1733
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1734
+ ttl: NotRequired[GetAllPrompts2TTL]
1735
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1736
+
1737
+ - `5m`: 5 minutes
1738
+ - `1h`: 1 hour
1739
+
1740
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1741
+ """
1742
+
1743
+
1744
+ class GetAllPrompts2CacheControl(BaseModel):
1745
+ type: GetAllPrompts2PromptsResponse200ApplicationJSONType
1746
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1747
+
1748
+ ttl: Optional[GetAllPrompts2TTL] = "5m"
1749
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1750
+
1751
+ - `5m`: 5 minutes
1752
+ - `1h`: 1 hour
1753
+
1754
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1755
+ """
1756
+
1757
+ @model_serializer(mode="wrap")
1758
+ def serialize_model(self, handler):
1759
+ optional_fields = set(["ttl"])
1760
+ serialized = handler(self)
1761
+ m = {}
1762
+
1763
+ for n, f in type(self).model_fields.items():
1764
+ k = f.alias or n
1765
+ val = serialized.get(k)
1766
+
1767
+ if val != UNSET_SENTINEL:
1768
+ if val is not None or k not in optional_fields:
1769
+ m[k] = val
1770
+
1771
+ return m
1772
+
1773
+
1774
+ class GetAllPrompts24TypedDict(TypedDict):
1775
+ type: GetAllPrompts2PromptsResponse200Type
1776
+ r"""The type of the content part. Always `file`."""
1777
+ file: FileContentPartSchemaTypedDict
1778
+ r"""File data for the content part. Must contain either file_data or uri, but not both."""
1779
+ cache_control: NotRequired[GetAllPrompts2CacheControlTypedDict]
1780
+
1781
+
1782
+ class GetAllPrompts24(BaseModel):
1783
+ type: GetAllPrompts2PromptsResponse200Type
1784
+ r"""The type of the content part. Always `file`."""
1785
+
1786
+ file: FileContentPartSchema
1787
+ r"""File data for the content part. Must contain either file_data or uri, but not both."""
1788
+
1789
+ cache_control: Optional[GetAllPrompts2CacheControl] = None
1790
+
1791
+ @model_serializer(mode="wrap")
1792
+ def serialize_model(self, handler):
1793
+ optional_fields = set(["cache_control"])
1794
+ serialized = handler(self)
1795
+ m = {}
1796
+
1797
+ for n, f in type(self).model_fields.items():
1798
+ k = f.alias or n
1799
+ val = serialized.get(k)
1800
+
1801
+ if val != UNSET_SENTINEL:
1802
+ if val is not None or k not in optional_fields:
1803
+ m[k] = val
1804
+
1805
+ return m
1806
+
1807
+
1808
+ GetAllPromptsContentPrompts2TypedDict = TypeAliasType(
1809
+ "GetAllPromptsContentPrompts2TypedDict",
1810
+ Union[
1811
+ AudioContentPartSchemaTypedDict,
1812
+ TextContentPartSchemaTypedDict,
1813
+ ImageContentPartSchemaTypedDict,
1814
+ GetAllPrompts24TypedDict,
1815
+ ],
1816
+ )
1817
+
1818
+
1819
+ GetAllPromptsContentPrompts2 = Annotated[
1820
+ Union[
1821
+ Annotated[TextContentPartSchema, Tag("text")],
1822
+ Annotated[ImageContentPartSchema, Tag("image_url")],
1823
+ Annotated[AudioContentPartSchema, Tag("input_audio")],
1824
+ Annotated[GetAllPrompts24, Tag("file")],
1825
+ ],
1826
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1827
+ ]
1828
+
1829
+
1830
+ GetAllPromptsMessagesPromptsContentTypedDict = TypeAliasType(
1831
+ "GetAllPromptsMessagesPromptsContentTypedDict",
1832
+ Union[str, List[GetAllPromptsContentPrompts2TypedDict]],
1833
+ )
1834
+ r"""The contents of the user message."""
1835
+
1836
+
1837
+ GetAllPromptsMessagesPromptsContent = TypeAliasType(
1838
+ "GetAllPromptsMessagesPromptsContent",
1839
+ Union[str, List[GetAllPromptsContentPrompts2]],
1840
+ )
1841
+ r"""The contents of the user message."""
1842
+
1843
+
1844
+ class GetAllPromptsMessagesUserMessageTypedDict(TypedDict):
1845
+ role: GetAllPromptsMessagesPromptsRole
1846
+ r"""The role of the messages author, in this case `user`."""
1847
+ content: GetAllPromptsMessagesPromptsContentTypedDict
1848
+ r"""The contents of the user message."""
1849
+ name: NotRequired[str]
1850
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1851
+
1852
+
1853
+ class GetAllPromptsMessagesUserMessage(BaseModel):
1854
+ role: GetAllPromptsMessagesPromptsRole
1855
+ r"""The role of the messages author, in this case `user`."""
1856
+
1857
+ content: GetAllPromptsMessagesPromptsContent
1858
+ r"""The contents of the user message."""
1859
+
1860
+ name: Optional[str] = None
1861
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1862
+
1863
+ @model_serializer(mode="wrap")
1864
+ def serialize_model(self, handler):
1865
+ optional_fields = set(["name"])
1866
+ serialized = handler(self)
1867
+ m = {}
1868
+
1869
+ for n, f in type(self).model_fields.items():
1870
+ k = f.alias or n
1871
+ val = serialized.get(k)
1872
+
1873
+ if val != UNSET_SENTINEL:
1874
+ if val is not None or k not in optional_fields:
1875
+ m[k] = val
1876
+
1877
+ return m
1878
+
1879
+
1880
+ GetAllPromptsMessagesRole = Literal["system",]
1881
+ r"""The role of the messages author, in this case `system`."""
1882
+
1883
+
1884
+ GetAllPromptsMessagesContentTypedDict = TypeAliasType(
1885
+ "GetAllPromptsMessagesContentTypedDict",
1886
+ Union[str, List[TextContentPartSchemaTypedDict]],
1887
+ )
1888
+ r"""The contents of the system message."""
1889
+
1890
+
1891
+ GetAllPromptsMessagesContent = TypeAliasType(
1892
+ "GetAllPromptsMessagesContent", Union[str, List[TextContentPartSchema]]
1893
+ )
1894
+ r"""The contents of the system message."""
1895
+
1896
+
1897
+ class GetAllPromptsMessagesSystemMessageTypedDict(TypedDict):
1898
+ r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
1899
+
1900
+ role: GetAllPromptsMessagesRole
1901
+ r"""The role of the messages author, in this case `system`."""
1902
+ content: GetAllPromptsMessagesContentTypedDict
1903
+ r"""The contents of the system message."""
1904
+ name: NotRequired[str]
1905
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1906
+
1907
+
1908
+ class GetAllPromptsMessagesSystemMessage(BaseModel):
1909
+ r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
1910
+
1911
+ role: GetAllPromptsMessagesRole
1912
+ r"""The role of the messages author, in this case `system`."""
1913
+
1914
+ content: GetAllPromptsMessagesContent
1915
+ r"""The contents of the system message."""
1916
+
1917
+ name: Optional[str] = None
1918
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1919
+
1920
+ @model_serializer(mode="wrap")
1921
+ def serialize_model(self, handler):
1922
+ optional_fields = set(["name"])
1923
+ serialized = handler(self)
1924
+ m = {}
1925
+
1926
+ for n, f in type(self).model_fields.items():
1927
+ k = f.alias or n
1928
+ val = serialized.get(k)
1929
+
1930
+ if val != UNSET_SENTINEL:
1931
+ if val is not None or k not in optional_fields:
1932
+ m[k] = val
1933
+
1934
+ return m
1935
+
1936
+
1937
+ GetAllPromptsPromptsMessagesTypedDict = TypeAliasType(
1938
+ "GetAllPromptsPromptsMessagesTypedDict",
1939
+ Union[
1940
+ GetAllPromptsMessagesSystemMessageTypedDict,
1941
+ GetAllPromptsMessagesUserMessageTypedDict,
1942
+ GetAllPromptsMessagesToolMessageTypedDict,
1943
+ GetAllPromptsMessagesAssistantMessageTypedDict,
1944
+ ],
1945
+ )
1946
+
1947
+
1948
+ GetAllPromptsPromptsMessages = Annotated[
1949
+ Union[
1950
+ Annotated[GetAllPromptsMessagesSystemMessage, Tag("system")],
1951
+ Annotated[GetAllPromptsMessagesUserMessage, Tag("user")],
1952
+ Annotated[GetAllPromptsMessagesAssistantMessage, Tag("assistant")],
1953
+ Annotated[GetAllPromptsMessagesToolMessage, Tag("tool")],
1954
+ ],
1955
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
1956
+ ]
1957
+
1958
+
1959
+ class GetAllPromptsPromptFieldTypedDict(TypedDict):
1960
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
1961
+
1962
+ audio: NotRequired[Nullable[GetAllPromptsAudioTypedDict]]
1963
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1964
+ frequency_penalty: NotRequired[Nullable[float]]
1965
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
1966
+ max_tokens: NotRequired[Nullable[int]]
1967
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
1968
+
1969
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
1970
+ """
1971
+ max_completion_tokens: NotRequired[Nullable[int]]
1972
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
1973
+ logprobs: NotRequired[Nullable[bool]]
1974
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
1975
+ top_logprobs: NotRequired[Nullable[int]]
1976
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
1977
+ n: NotRequired[Nullable[int]]
1978
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
1979
+ presence_penalty: NotRequired[Nullable[float]]
1980
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
1981
+ response_format: NotRequired[GetAllPromptsResponseFormatTypedDict]
1982
+ r"""An object specifying the format that the model must output"""
1983
+ reasoning_effort: NotRequired[GetAllPromptsReasoningEffort]
1984
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1985
+
1986
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1987
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1988
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1989
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1990
+
1991
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1992
+ """
1993
+ verbosity: NotRequired[str]
1994
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
1995
+ seed: NotRequired[Nullable[float]]
1996
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
1997
+ stop: NotRequired[Nullable[GetAllPromptsStopTypedDict]]
1998
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1999
+ stream_options: NotRequired[Nullable[GetAllPromptsStreamOptionsTypedDict]]
2000
+ r"""Options for streaming response. Only set this when you set stream: true."""
2001
+ thinking: NotRequired[GetAllPromptsThinkingTypedDict]
2002
+ temperature: NotRequired[Nullable[float]]
2003
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
2004
+ top_p: NotRequired[Nullable[float]]
2005
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
2006
+ top_k: NotRequired[Nullable[float]]
2007
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
2008
+ tool_choice: NotRequired[GetAllPromptsToolChoiceTypedDict]
2009
+ r"""Controls which (if any) tool is called by the model."""
2010
+ parallel_tool_calls: NotRequired[bool]
2011
+ r"""Whether to enable parallel function calling during tool use."""
2012
+ modalities: NotRequired[Nullable[List[GetAllPromptsModalities]]]
2013
+ r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
2014
+ guardrails: NotRequired[List[GetAllPromptsGuardrailsTypedDict]]
2015
+ r"""A list of guardrails to apply to the request."""
2016
+ fallbacks: NotRequired[List[GetAllPromptsFallbacksTypedDict]]
2017
+ r"""Array of fallback models to use if primary model fails"""
2018
+ retry: NotRequired[GetAllPromptsRetryTypedDict]
2019
+ r"""Retry configuration for the request"""
2020
+ cache: NotRequired[GetAllPromptsCacheTypedDict]
2021
+ r"""Cache configuration for the request."""
2022
+ load_balancer: NotRequired[GetAllPromptsLoadBalancerTypedDict]
2023
+ r"""Load balancer configuration for the request."""
2024
+ timeout: NotRequired[GetAllPromptsTimeoutTypedDict]
2025
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
2026
+ messages: NotRequired[List[GetAllPromptsPromptsMessagesTypedDict]]
2027
+ r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
2028
+ model: NotRequired[Nullable[str]]
2029
+ r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
2030
+ version: NotRequired[str]
2031
+
2032
+
2033
+ class GetAllPromptsPromptField(BaseModel):
2034
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2035
+
2036
+ audio: OptionalNullable[GetAllPromptsAudio] = UNSET
2037
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
2038
+
2039
+ frequency_penalty: OptionalNullable[float] = UNSET
2040
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
2041
+
2042
+ max_tokens: OptionalNullable[int] = UNSET
2043
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
2044
+
2045
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
2046
+ """
2047
+
2048
+ max_completion_tokens: OptionalNullable[int] = UNSET
2049
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
2050
+
2051
+ logprobs: OptionalNullable[bool] = UNSET
2052
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
2053
+
2054
+ top_logprobs: OptionalNullable[int] = UNSET
2055
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
2056
+
2057
+ n: OptionalNullable[int] = UNSET
2058
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
2059
+
2060
+ presence_penalty: OptionalNullable[float] = UNSET
2061
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
2062
+
2063
+ response_format: Optional[GetAllPromptsResponseFormat] = None
2064
+ r"""An object specifying the format that the model must output"""
2065
+
2066
+ reasoning_effort: Optional[GetAllPromptsReasoningEffort] = None
2067
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
2068
+
2069
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
2070
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
2071
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
2072
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
2073
+
2074
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
2075
+ """
2076
+
2077
+ verbosity: Optional[str] = None
2078
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
2079
+
2080
+ seed: OptionalNullable[float] = UNSET
2081
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
2082
+
2083
+ stop: OptionalNullable[GetAllPromptsStop] = UNSET
2084
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
2085
+
2086
+ stream_options: OptionalNullable[GetAllPromptsStreamOptions] = UNSET
2087
+ r"""Options for streaming response. Only set this when you set stream: true."""
2088
+
2089
+ thinking: Optional[GetAllPromptsThinking] = None
2090
+
2091
+ temperature: OptionalNullable[float] = UNSET
2092
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
2093
+
2094
+ top_p: OptionalNullable[float] = UNSET
2095
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
2096
+
2097
+ top_k: OptionalNullable[float] = UNSET
2098
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
2099
+
2100
+ tool_choice: Optional[GetAllPromptsToolChoice] = None
2101
+ r"""Controls which (if any) tool is called by the model."""
2102
+
2103
+ parallel_tool_calls: Optional[bool] = None
2104
+ r"""Whether to enable parallel function calling during tool use."""
2105
+
2106
+ modalities: OptionalNullable[List[GetAllPromptsModalities]] = UNSET
2107
+ r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
2108
+
2109
+ guardrails: Optional[List[GetAllPromptsGuardrails]] = None
2110
+ r"""A list of guardrails to apply to the request."""
2111
+
2112
+ fallbacks: Optional[List[GetAllPromptsFallbacks]] = None
2113
+ r"""Array of fallback models to use if primary model fails"""
2114
+
2115
+ retry: Optional[GetAllPromptsRetry] = None
2116
+ r"""Retry configuration for the request"""
2117
+
2118
+ cache: Optional[GetAllPromptsCache] = None
2119
+ r"""Cache configuration for the request."""
2120
+
2121
+ load_balancer: Optional[GetAllPromptsLoadBalancer] = None
2122
+ r"""Load balancer configuration for the request."""
2123
+
2124
+ timeout: Optional[GetAllPromptsTimeout] = None
2125
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
2126
+
2127
+ messages: Optional[List[GetAllPromptsPromptsMessages]] = None
2128
+ r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
2129
+
2130
+ model: OptionalNullable[str] = UNSET
2131
+ r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
2132
+
2133
+ version: Optional[str] = None
2134
+
2135
+ @model_serializer(mode="wrap")
2136
+ def serialize_model(self, handler):
2137
+ optional_fields = set(
2138
+ [
2139
+ "audio",
2140
+ "frequency_penalty",
2141
+ "max_tokens",
2142
+ "max_completion_tokens",
2143
+ "logprobs",
2144
+ "top_logprobs",
2145
+ "n",
2146
+ "presence_penalty",
2147
+ "response_format",
2148
+ "reasoning_effort",
2149
+ "verbosity",
2150
+ "seed",
2151
+ "stop",
2152
+ "stream_options",
2153
+ "thinking",
2154
+ "temperature",
2155
+ "top_p",
2156
+ "top_k",
2157
+ "tool_choice",
2158
+ "parallel_tool_calls",
2159
+ "modalities",
2160
+ "guardrails",
2161
+ "fallbacks",
2162
+ "retry",
2163
+ "cache",
2164
+ "load_balancer",
2165
+ "timeout",
2166
+ "messages",
2167
+ "model",
2168
+ "version",
2169
+ ]
2170
+ )
2171
+ nullable_fields = set(
2172
+ [
2173
+ "audio",
2174
+ "frequency_penalty",
2175
+ "max_tokens",
2176
+ "max_completion_tokens",
2177
+ "logprobs",
2178
+ "top_logprobs",
2179
+ "n",
2180
+ "presence_penalty",
2181
+ "seed",
2182
+ "stop",
2183
+ "stream_options",
2184
+ "temperature",
2185
+ "top_p",
2186
+ "top_k",
2187
+ "modalities",
2188
+ "model",
2189
+ ]
2190
+ )
2191
+ serialized = handler(self)
2192
+ m = {}
2193
+
2194
+ for n, f in type(self).model_fields.items():
2195
+ k = f.alias or n
2196
+ val = serialized.get(k)
2197
+ is_nullable_and_explicitly_set = (
2198
+ k in nullable_fields
2199
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2200
+ )
2201
+
2202
+ if val != UNSET_SENTINEL:
2203
+ if (
2204
+ val is not None
2205
+ or k not in optional_fields
2206
+ or is_nullable_and_explicitly_set
2207
+ ):
2208
+ m[k] = val
2209
+
2210
+ return m
2211
+
2212
+
2213
+ GetAllPromptsUseCases = Literal[
2214
+ "Agents simulations",
2215
+ "Agents",
2216
+ "API interaction",
2217
+ "Autonomous Agents",
2218
+ "Chatbots",
2219
+ "Classification",
2220
+ "Code understanding",
2221
+ "Code writing",
2222
+ "Conversation",
2223
+ "Documents QA",
2224
+ "Evaluation",
2225
+ "Extraction",
2226
+ "Multi-modal",
2227
+ "Self-checking",
2228
+ "Sentiment analysis",
2229
+ "SQL",
2230
+ "Summarization",
2231
+ "Tagging",
2232
+ "Translation (document)",
2233
+ "Translation (sentences)",
2234
+ ]
2235
+
2236
+
2237
+ GetAllPromptsLanguage = Literal[
2238
+ "Chinese",
2239
+ "Dutch",
2240
+ "English",
2241
+ "French",
2242
+ "German",
2243
+ "Russian",
2244
+ "Spanish",
2245
+ ]
2246
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2247
+
2248
+
2249
+ class GetAllPromptsMetadataTypedDict(TypedDict):
2250
+ use_cases: NotRequired[List[GetAllPromptsUseCases]]
2251
+ r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
2252
+ language: NotRequired[Nullable[GetAllPromptsLanguage]]
2253
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2254
+
2255
+
2256
+ class GetAllPromptsMetadata(BaseModel):
2257
+ use_cases: Optional[List[GetAllPromptsUseCases]] = None
2258
+ r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
2259
+
2260
+ language: OptionalNullable[GetAllPromptsLanguage] = UNSET
2261
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2262
+
2263
+ @model_serializer(mode="wrap")
2264
+ def serialize_model(self, handler):
2265
+ optional_fields = set(["use_cases", "language"])
2266
+ nullable_fields = set(["language"])
2267
+ serialized = handler(self)
2268
+ m = {}
2269
+
2270
+ for n, f in type(self).model_fields.items():
2271
+ k = f.alias or n
2272
+ val = serialized.get(k)
2273
+ is_nullable_and_explicitly_set = (
2274
+ k in nullable_fields
2275
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2276
+ )
2277
+
2278
+ if val != UNSET_SENTINEL:
2279
+ if (
2280
+ val is not None
2281
+ or k not in optional_fields
2282
+ or is_nullable_and_explicitly_set
2283
+ ):
2284
+ m[k] = val
2285
+
2286
+ return m
2287
+
2288
+
2289
+ class GetAllPromptsPromptTypedDict(TypedDict):
2290
+ r"""A prompt entity with configuration, metadata, and versioning."""
2291
+
2292
+ id: str
2293
+ type: GetAllPromptsType
2294
+ owner: str
2295
+ domain_id: str
2296
+ created: str
2297
+ updated: str
2298
+ display_name: str
2299
+ r"""The prompt’s name, meant to be displayable in the UI."""
2300
+ prompt: GetAllPromptsPromptFieldTypedDict
2301
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2302
+ created_by_id: NotRequired[Nullable[str]]
2303
+ updated_by_id: NotRequired[Nullable[str]]
2304
+ description: NotRequired[Nullable[str]]
2305
+ r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
2306
+ prompt_config: NotRequired[GetAllPromptsPromptConfigTypedDict]
2307
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
2308
+ metadata: NotRequired[GetAllPromptsMetadataTypedDict]
2309
+
2310
+
2311
+ class GetAllPromptsPrompt(BaseModel):
2312
+ r"""A prompt entity with configuration, metadata, and versioning."""
2313
+
2314
+ id: Annotated[str, pydantic.Field(alias="_id")]
2315
+
2316
+ type: GetAllPromptsType
2317
+
2318
+ owner: str
2319
+
2320
+ domain_id: str
2321
+
2322
+ created: str
2323
+
2324
+ updated: str
2325
+
2326
+ display_name: str
2327
+ r"""The prompt’s name, meant to be displayable in the UI."""
2328
+
2329
+ prompt: GetAllPromptsPromptField
2330
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2331
+
2332
+ created_by_id: OptionalNullable[str] = UNSET
2333
+
2334
+ updated_by_id: OptionalNullable[str] = UNSET
2335
+
2336
+ description: OptionalNullable[str] = UNSET
2337
+ r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
2338
+
2339
+ prompt_config: Annotated[
2340
+ Optional[GetAllPromptsPromptConfig],
2341
+ pydantic.Field(
2342
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
2343
+ ),
2344
+ ] = None
2345
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
2346
+
2347
+ metadata: Optional[GetAllPromptsMetadata] = None
2348
+
2349
+ @model_serializer(mode="wrap")
2350
+ def serialize_model(self, handler):
2351
+ optional_fields = set(
2352
+ [
2353
+ "created_by_id",
2354
+ "updated_by_id",
2355
+ "description",
2356
+ "prompt_config",
2357
+ "metadata",
2358
+ ]
2359
+ )
2360
+ nullable_fields = set(["created_by_id", "updated_by_id", "description"])
2361
+ serialized = handler(self)
900
2362
  m = {}
901
2363
 
902
2364
  for n, f in type(self).model_fields.items():
903
2365
  k = f.alias or n
904
2366
  val = serialized.get(k)
905
- serialized.pop(k, None)
906
-
907
- optional_nullable = k in optional_fields and k in nullable_fields
908
- is_set = (
909
- self.__pydantic_fields_set__.intersection({n})
910
- or k in null_default_fields
911
- ) # pylint: disable=no-member
912
-
913
- if val is not None and val != UNSET_SENTINEL:
914
- m[k] = val
915
- elif val != UNSET_SENTINEL and (
916
- not k in optional_fields or (optional_nullable and is_set)
917
- ):
918
- m[k] = val
2367
+ is_nullable_and_explicitly_set = (
2368
+ k in nullable_fields
2369
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2370
+ )
2371
+
2372
+ if val != UNSET_SENTINEL:
2373
+ if (
2374
+ val is not None
2375
+ or k not in optional_fields
2376
+ or is_nullable_and_explicitly_set
2377
+ ):
2378
+ m[k] = val
919
2379
 
920
2380
  return m
921
2381