orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -1,6 +1,30 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .audiocontentpartschema import (
5
+ AudioContentPartSchema,
6
+ AudioContentPartSchemaTypedDict,
7
+ )
8
+ from .filecontentpartschema import FileContentPartSchema, FileContentPartSchemaTypedDict
9
+ from .imagecontentpartschema import (
10
+ ImageContentPartSchema,
11
+ ImageContentPartSchemaTypedDict,
12
+ )
13
+ from .reasoningpartschema import ReasoningPartSchema, ReasoningPartSchemaTypedDict
14
+ from .redactedreasoningpartschema import (
15
+ RedactedReasoningPartSchema,
16
+ RedactedReasoningPartSchemaTypedDict,
17
+ )
18
+ from .refusalpartschema import RefusalPartSchema, RefusalPartSchemaTypedDict
19
+ from .textcontentpartschema import TextContentPartSchema, TextContentPartSchemaTypedDict
20
+ from .thinkingconfigdisabledschema import (
21
+ ThinkingConfigDisabledSchema,
22
+ ThinkingConfigDisabledSchemaTypedDict,
23
+ )
24
+ from .thinkingconfigenabledschema import (
25
+ ThinkingConfigEnabledSchema,
26
+ ThinkingConfigEnabledSchemaTypedDict,
27
+ )
4
28
  from orq_ai_sdk.types import (
5
29
  BaseModel,
6
30
  Nullable,
@@ -17,7 +41,13 @@ from orq_ai_sdk.utils import (
17
41
  import pydantic
18
42
  from pydantic import Discriminator, Tag, model_serializer
19
43
  from typing import Any, Dict, List, Literal, Optional, Union
20
- from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
44
+ from typing_extensions import (
45
+ Annotated,
46
+ NotRequired,
47
+ TypeAliasType,
48
+ TypedDict,
49
+ deprecated,
50
+ )
21
51
 
22
52
 
23
53
  class ListPromptVersionsRequestTypedDict(TypedDict):
@@ -53,6 +83,22 @@ class ListPromptVersionsRequest(BaseModel):
53
83
  ] = None
54
84
  r"""A cursor for use in pagination. `ending_before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 20 objects, starting with `01JJ1HDHN79XAS7A01WB3HYSDB`, your subsequent call can include `before=01JJ1HDHN79XAS7A01WB3HYSDB` in order to fetch the previous page of the list."""
55
85
 
86
+ @model_serializer(mode="wrap")
87
+ def serialize_model(self, handler):
88
+ optional_fields = set(["limit", "starting_after", "ending_before"])
89
+ serialized = handler(self)
90
+ m = {}
91
+
92
+ for n, f in type(self).model_fields.items():
93
+ k = f.alias or n
94
+ val = serialized.get(k)
95
+
96
+ if val != UNSET_SENTINEL:
97
+ if val is not None or k not in optional_fields:
98
+ m[k] = val
99
+
100
+ return m
101
+
56
102
 
57
103
  ListPromptVersionsObject = Literal["list",]
58
104
 
@@ -65,6 +111,7 @@ ListPromptVersionsModelType = Literal[
65
111
  "tts",
66
112
  "stt",
67
113
  "rerank",
114
+ "ocr",
68
115
  "moderation",
69
116
  "vision",
70
117
  ]
@@ -105,39 +152,43 @@ ListPromptVersionsResponseFormat4 = Literal[
105
152
  ]
106
153
 
107
154
 
108
- ListPromptVersionsResponseFormatPromptsResponseType = Literal["text",]
155
+ ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType = (
156
+ Literal["text",]
157
+ )
109
158
 
110
159
 
111
160
  class ListPromptVersionsResponseFormat3TypedDict(TypedDict):
112
- type: ListPromptVersionsResponseFormatPromptsResponseType
161
+ type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
113
162
 
114
163
 
115
164
  class ListPromptVersionsResponseFormat3(BaseModel):
116
- type: ListPromptVersionsResponseFormatPromptsResponseType
165
+ type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
117
166
 
118
167
 
119
- ListPromptVersionsResponseFormatPromptsType = Literal["json_object",]
168
+ ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONType = Literal[
169
+ "json_object",
170
+ ]
120
171
 
121
172
 
122
173
  class ListPromptVersionsResponseFormat2TypedDict(TypedDict):
123
- type: ListPromptVersionsResponseFormatPromptsType
174
+ type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONType
124
175
 
125
176
 
126
177
  class ListPromptVersionsResponseFormat2(BaseModel):
127
- type: ListPromptVersionsResponseFormatPromptsType
178
+ type: ListPromptVersionsResponseFormatPromptsResponse200ApplicationJSONType
128
179
 
129
180
 
130
- ListPromptVersionsResponseFormatType = Literal["json_schema",]
181
+ ListPromptVersionsResponseFormatPromptsResponse200Type = Literal["json_schema",]
131
182
 
132
183
 
133
- class ListPromptVersionsResponseFormatJSONSchemaTypedDict(TypedDict):
184
+ class ListPromptVersionsResponseFormatPromptsResponseJSONSchemaTypedDict(TypedDict):
134
185
  name: str
135
186
  schema_: Dict[str, Any]
136
187
  description: NotRequired[str]
137
188
  strict: NotRequired[bool]
138
189
 
139
190
 
140
- class ListPromptVersionsResponseFormatJSONSchema(BaseModel):
191
+ class ListPromptVersionsResponseFormatPromptsResponseJSONSchema(BaseModel):
141
192
  name: str
142
193
 
143
194
  schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
@@ -146,23 +197,55 @@ class ListPromptVersionsResponseFormatJSONSchema(BaseModel):
146
197
 
147
198
  strict: Optional[bool] = None
148
199
 
200
+ @model_serializer(mode="wrap")
201
+ def serialize_model(self, handler):
202
+ optional_fields = set(["description", "strict"])
203
+ serialized = handler(self)
204
+ m = {}
205
+
206
+ for n, f in type(self).model_fields.items():
207
+ k = f.alias or n
208
+ val = serialized.get(k)
209
+
210
+ if val != UNSET_SENTINEL:
211
+ if val is not None or k not in optional_fields:
212
+ m[k] = val
213
+
214
+ return m
215
+
149
216
 
150
217
  class ListPromptVersionsResponseFormat1TypedDict(TypedDict):
151
- type: ListPromptVersionsResponseFormatType
152
- json_schema: ListPromptVersionsResponseFormatJSONSchemaTypedDict
218
+ type: ListPromptVersionsResponseFormatPromptsResponse200Type
219
+ json_schema: ListPromptVersionsResponseFormatPromptsResponseJSONSchemaTypedDict
153
220
  display_name: NotRequired[str]
154
221
 
155
222
 
156
223
  class ListPromptVersionsResponseFormat1(BaseModel):
157
- type: ListPromptVersionsResponseFormatType
224
+ type: ListPromptVersionsResponseFormatPromptsResponse200Type
158
225
 
159
- json_schema: ListPromptVersionsResponseFormatJSONSchema
226
+ json_schema: ListPromptVersionsResponseFormatPromptsResponseJSONSchema
160
227
 
161
228
  display_name: Optional[str] = None
162
229
 
230
+ @model_serializer(mode="wrap")
231
+ def serialize_model(self, handler):
232
+ optional_fields = set(["display_name"])
233
+ serialized = handler(self)
234
+ m = {}
235
+
236
+ for n, f in type(self).model_fields.items():
237
+ k = f.alias or n
238
+ val = serialized.get(k)
239
+
240
+ if val != UNSET_SENTINEL:
241
+ if val is not None or k not in optional_fields:
242
+ m[k] = val
163
243
 
164
- ListPromptVersionsResponseFormatTypedDict = TypeAliasType(
165
- "ListPromptVersionsResponseFormatTypedDict",
244
+ return m
245
+
246
+
247
+ ListPromptVersionsPromptsResponseFormatTypedDict = TypeAliasType(
248
+ "ListPromptVersionsPromptsResponseFormatTypedDict",
166
249
  Union[
167
250
  ListPromptVersionsResponseFormat2TypedDict,
168
251
  ListPromptVersionsResponseFormat3TypedDict,
@@ -182,8 +265,8 @@ Important: when using JSON mode, you must also instruct the model to produce JSO
182
265
  """
183
266
 
184
267
 
185
- ListPromptVersionsResponseFormat = TypeAliasType(
186
- "ListPromptVersionsResponseFormat",
268
+ ListPromptVersionsPromptsResponseFormat = TypeAliasType(
269
+ "ListPromptVersionsPromptsResponseFormat",
187
270
  Union[
188
271
  ListPromptVersionsResponseFormat2,
189
272
  ListPromptVersionsResponseFormat3,
@@ -217,7 +300,7 @@ ListPromptVersionsEncodingFormat = Literal[
217
300
  r"""The format to return the embeddings"""
218
301
 
219
302
 
220
- ListPromptVersionsReasoningEffort = Literal[
303
+ ListPromptVersionsPromptsReasoningEffort = Literal[
221
304
  "none",
222
305
  "disable",
223
306
  "minimal",
@@ -270,7 +353,9 @@ class ListPromptVersionsModelParametersTypedDict(TypedDict):
270
353
  r"""Only supported on `image` models."""
271
354
  style: NotRequired[str]
272
355
  r"""Only supported on `image` models."""
273
- response_format: NotRequired[Nullable[ListPromptVersionsResponseFormatTypedDict]]
356
+ response_format: NotRequired[
357
+ Nullable[ListPromptVersionsPromptsResponseFormatTypedDict]
358
+ ]
274
359
  r"""An object specifying the format that the model must output.
275
360
 
276
361
  Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
@@ -283,7 +368,7 @@ class ListPromptVersionsModelParametersTypedDict(TypedDict):
283
368
  r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
284
369
  encoding_format: NotRequired[ListPromptVersionsEncodingFormat]
285
370
  r"""The format to return the embeddings"""
286
- reasoning_effort: NotRequired[ListPromptVersionsReasoningEffort]
371
+ reasoning_effort: NotRequired[ListPromptVersionsPromptsReasoningEffort]
287
372
  r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
288
373
  budget_tokens: NotRequired[float]
289
374
  r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
@@ -339,7 +424,7 @@ class ListPromptVersionsModelParameters(BaseModel):
339
424
  r"""Only supported on `image` models."""
340
425
 
341
426
  response_format: Annotated[
342
- OptionalNullable[ListPromptVersionsResponseFormat],
427
+ OptionalNullable[ListPromptVersionsPromptsResponseFormat],
343
428
  pydantic.Field(alias="responseFormat"),
344
429
  ] = UNSET
345
430
  r"""An object specifying the format that the model must output.
@@ -361,7 +446,7 @@ class ListPromptVersionsModelParameters(BaseModel):
361
446
  r"""The format to return the embeddings"""
362
447
 
363
448
  reasoning_effort: Annotated[
364
- Optional[ListPromptVersionsReasoningEffort],
449
+ Optional[ListPromptVersionsPromptsReasoningEffort],
365
450
  pydantic.Field(alias="reasoningEffort"),
366
451
  ] = None
367
452
  r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
@@ -381,51 +466,48 @@ class ListPromptVersionsModelParameters(BaseModel):
381
466
 
382
467
  @model_serializer(mode="wrap")
383
468
  def serialize_model(self, handler):
384
- optional_fields = [
385
- "temperature",
386
- "maxTokens",
387
- "topK",
388
- "topP",
389
- "frequencyPenalty",
390
- "presencePenalty",
391
- "numImages",
392
- "seed",
393
- "format",
394
- "dimensions",
395
- "quality",
396
- "style",
397
- "responseFormat",
398
- "photoRealVersion",
399
- "encoding_format",
400
- "reasoningEffort",
401
- "budgetTokens",
402
- "verbosity",
403
- "thinkingLevel",
404
- ]
405
- nullable_fields = ["responseFormat"]
406
- null_default_fields = []
407
-
469
+ optional_fields = set(
470
+ [
471
+ "temperature",
472
+ "maxTokens",
473
+ "topK",
474
+ "topP",
475
+ "frequencyPenalty",
476
+ "presencePenalty",
477
+ "numImages",
478
+ "seed",
479
+ "format",
480
+ "dimensions",
481
+ "quality",
482
+ "style",
483
+ "responseFormat",
484
+ "photoRealVersion",
485
+ "encoding_format",
486
+ "reasoningEffort",
487
+ "budgetTokens",
488
+ "verbosity",
489
+ "thinkingLevel",
490
+ ]
491
+ )
492
+ nullable_fields = set(["responseFormat"])
408
493
  serialized = handler(self)
409
-
410
494
  m = {}
411
495
 
412
496
  for n, f in type(self).model_fields.items():
413
497
  k = f.alias or n
414
498
  val = serialized.get(k)
415
- serialized.pop(k, None)
416
-
417
- optional_nullable = k in optional_fields and k in nullable_fields
418
- is_set = (
419
- self.__pydantic_fields_set__.intersection({n})
420
- or k in null_default_fields
421
- ) # pylint: disable=no-member
422
-
423
- if val is not None and val != UNSET_SENTINEL:
424
- m[k] = val
425
- elif val != UNSET_SENTINEL and (
426
- not k in optional_fields or (optional_nullable and is_set)
427
- ):
428
- m[k] = val
499
+ is_nullable_and_explicitly_set = (
500
+ k in nullable_fields
501
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
502
+ )
503
+
504
+ if val != UNSET_SENTINEL:
505
+ if (
506
+ val is not None
507
+ or k not in optional_fields
508
+ or is_nullable_and_explicitly_set
509
+ ):
510
+ m[k] = val
429
511
 
430
512
  return m
431
513
 
@@ -501,6 +583,22 @@ class ListPromptVersions2File(BaseModel):
501
583
  filename: Optional[str] = None
502
584
  r"""The name of the file, used when passing the file to the model as a string."""
503
585
 
586
+ @model_serializer(mode="wrap")
587
+ def serialize_model(self, handler):
588
+ optional_fields = set(["file_data", "uri", "mimeType", "filename"])
589
+ serialized = handler(self)
590
+ m = {}
591
+
592
+ for n, f in type(self).model_fields.items():
593
+ k = f.alias or n
594
+ val = serialized.get(k)
595
+
596
+ if val != UNSET_SENTINEL:
597
+ if val is not None or k not in optional_fields:
598
+ m[k] = val
599
+
600
+ return m
601
+
504
602
 
505
603
  class ListPromptVersions23TypedDict(TypedDict):
506
604
  type: ListPromptVersions2PromptsResponseType
@@ -537,6 +635,22 @@ class ListPromptVersions2ImageURL(BaseModel):
537
635
  detail: Optional[str] = None
538
636
  r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
539
637
 
638
+ @model_serializer(mode="wrap")
639
+ def serialize_model(self, handler):
640
+ optional_fields = set(["id", "detail"])
641
+ serialized = handler(self)
642
+ m = {}
643
+
644
+ for n, f in type(self).model_fields.items():
645
+ k = f.alias or n
646
+ val = serialized.get(k)
647
+
648
+ if val != UNSET_SENTINEL:
649
+ if val is not None or k not in optional_fields:
650
+ m[k] = val
651
+
652
+ return m
653
+
540
654
 
541
655
  class ListPromptVersions22TypedDict(TypedDict):
542
656
  r"""The image part of the prompt message. Only supported with vision models."""
@@ -604,7 +718,7 @@ ListPromptVersionsContent = TypeAliasType(
604
718
  r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
605
719
 
606
720
 
607
- ListPromptVersionsType = Literal["function",]
721
+ ListPromptVersionsPromptsType = Literal["function",]
608
722
 
609
723
 
610
724
  class ListPromptVersionsFunctionTypedDict(TypedDict):
@@ -621,14 +735,14 @@ class ListPromptVersionsFunction(BaseModel):
621
735
 
622
736
 
623
737
  class ListPromptVersionsToolCallsTypedDict(TypedDict):
624
- type: ListPromptVersionsType
738
+ type: ListPromptVersionsPromptsType
625
739
  function: ListPromptVersionsFunctionTypedDict
626
740
  id: NotRequired[str]
627
741
  index: NotRequired[float]
628
742
 
629
743
 
630
744
  class ListPromptVersionsToolCalls(BaseModel):
631
- type: ListPromptVersionsType
745
+ type: ListPromptVersionsPromptsType
632
746
 
633
747
  function: ListPromptVersionsFunction
634
748
 
@@ -636,6 +750,22 @@ class ListPromptVersionsToolCalls(BaseModel):
636
750
 
637
751
  index: Optional[float] = None
638
752
 
753
+ @model_serializer(mode="wrap")
754
+ def serialize_model(self, handler):
755
+ optional_fields = set(["id", "index"])
756
+ serialized = handler(self)
757
+ m = {}
758
+
759
+ for n, f in type(self).model_fields.items():
760
+ k = f.alias or n
761
+ val = serialized.get(k)
762
+
763
+ if val != UNSET_SENTINEL:
764
+ if val is not None or k not in optional_fields:
765
+ m[k] = val
766
+
767
+ return m
768
+
639
769
 
640
770
  class ListPromptVersionsMessagesTypedDict(TypedDict):
641
771
  role: ListPromptVersionsRole
@@ -659,61 +789,62 @@ class ListPromptVersionsMessages(BaseModel):
659
789
 
660
790
  @model_serializer(mode="wrap")
661
791
  def serialize_model(self, handler):
662
- optional_fields = ["tool_calls", "tool_call_id"]
663
- nullable_fields = ["content", "tool_call_id"]
664
- null_default_fields = []
665
-
792
+ optional_fields = set(["tool_calls", "tool_call_id"])
793
+ nullable_fields = set(["content", "tool_call_id"])
666
794
  serialized = handler(self)
667
-
668
795
  m = {}
669
796
 
670
797
  for n, f in type(self).model_fields.items():
671
798
  k = f.alias or n
672
799
  val = serialized.get(k)
673
- serialized.pop(k, None)
674
-
675
- optional_nullable = k in optional_fields and k in nullable_fields
676
- is_set = (
677
- self.__pydantic_fields_set__.intersection({n})
678
- or k in null_default_fields
679
- ) # pylint: disable=no-member
680
-
681
- if val is not None and val != UNSET_SENTINEL:
682
- m[k] = val
683
- elif val != UNSET_SENTINEL and (
684
- not k in optional_fields or (optional_nullable and is_set)
685
- ):
686
- m[k] = val
800
+ is_nullable_and_explicitly_set = (
801
+ k in nullable_fields
802
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
803
+ )
804
+
805
+ if val != UNSET_SENTINEL:
806
+ if (
807
+ val is not None
808
+ or k not in optional_fields
809
+ or is_nullable_and_explicitly_set
810
+ ):
811
+ m[k] = val
687
812
 
688
813
  return m
689
814
 
690
815
 
816
+ @deprecated(
817
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
818
+ )
691
819
  class ListPromptVersionsPromptConfigTypedDict(TypedDict):
692
- r"""A list of messages compatible with the openAI schema"""
820
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
693
821
 
694
822
  messages: List[ListPromptVersionsMessagesTypedDict]
695
823
  stream: NotRequired[bool]
696
- model: NotRequired[str]
824
+ model: NotRequired[Nullable[str]]
697
825
  model_db_id: NotRequired[Nullable[str]]
698
826
  r"""The id of the resource"""
699
827
  model_type: NotRequired[Nullable[ListPromptVersionsModelType]]
700
828
  r"""The modality of the model"""
701
829
  model_parameters: NotRequired[ListPromptVersionsModelParametersTypedDict]
702
830
  r"""Model Parameters: Not all parameters apply to every model"""
703
- provider: NotRequired[ListPromptVersionsProvider]
831
+ provider: NotRequired[Nullable[ListPromptVersionsProvider]]
704
832
  integration_id: NotRequired[Nullable[str]]
705
833
  r"""The ID of the integration to use"""
706
834
  version: NotRequired[str]
707
835
 
708
836
 
837
+ @deprecated(
838
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
839
+ )
709
840
  class ListPromptVersionsPromptConfig(BaseModel):
710
- r"""A list of messages compatible with the openAI schema"""
841
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
711
842
 
712
843
  messages: List[ListPromptVersionsMessages]
713
844
 
714
845
  stream: Optional[bool] = None
715
846
 
716
- model: Optional[str] = None
847
+ model: OptionalNullable[str] = UNSET
717
848
 
718
849
  model_db_id: OptionalNullable[str] = UNSET
719
850
  r"""The id of the resource"""
@@ -724,7 +855,7 @@ class ListPromptVersionsPromptConfig(BaseModel):
724
855
  model_parameters: Optional[ListPromptVersionsModelParameters] = None
725
856
  r"""Model Parameters: Not all parameters apply to every model"""
726
857
 
727
- provider: Optional[ListPromptVersionsProvider] = None
858
+ provider: OptionalNullable[ListPromptVersionsProvider] = UNSET
728
859
 
729
860
  integration_id: OptionalNullable[str] = UNSET
730
861
  r"""The ID of the integration to use"""
@@ -733,181 +864,1518 @@ class ListPromptVersionsPromptConfig(BaseModel):
733
864
 
734
865
  @model_serializer(mode="wrap")
735
866
  def serialize_model(self, handler):
736
- optional_fields = [
737
- "stream",
738
- "model",
739
- "model_db_id",
740
- "model_type",
741
- "model_parameters",
742
- "provider",
743
- "integration_id",
744
- "version",
745
- ]
746
- nullable_fields = ["model_db_id", "model_type", "integration_id"]
747
- null_default_fields = []
748
-
867
+ optional_fields = set(
868
+ [
869
+ "stream",
870
+ "model",
871
+ "model_db_id",
872
+ "model_type",
873
+ "model_parameters",
874
+ "provider",
875
+ "integration_id",
876
+ "version",
877
+ ]
878
+ )
879
+ nullable_fields = set(
880
+ ["model", "model_db_id", "model_type", "provider", "integration_id"]
881
+ )
749
882
  serialized = handler(self)
750
-
751
883
  m = {}
752
884
 
753
885
  for n, f in type(self).model_fields.items():
754
886
  k = f.alias or n
755
887
  val = serialized.get(k)
756
- serialized.pop(k, None)
757
-
758
- optional_nullable = k in optional_fields and k in nullable_fields
759
- is_set = (
760
- self.__pydantic_fields_set__.intersection({n})
761
- or k in null_default_fields
762
- ) # pylint: disable=no-member
763
-
764
- if val is not None and val != UNSET_SENTINEL:
765
- m[k] = val
766
- elif val != UNSET_SENTINEL and (
767
- not k in optional_fields or (optional_nullable and is_set)
768
- ):
769
- m[k] = val
888
+ is_nullable_and_explicitly_set = (
889
+ k in nullable_fields
890
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
891
+ )
892
+
893
+ if val != UNSET_SENTINEL:
894
+ if (
895
+ val is not None
896
+ or k not in optional_fields
897
+ or is_nullable_and_explicitly_set
898
+ ):
899
+ m[k] = val
770
900
 
771
901
  return m
772
902
 
773
903
 
774
- ListPromptVersionsUseCases = Literal[
775
- "Agents simulations",
776
- "Agents",
777
- "API interaction",
778
- "Autonomous Agents",
779
- "Chatbots",
780
- "Classification",
781
- "Code understanding",
782
- "Code writing",
783
- "Conversation",
784
- "Documents QA",
785
- "Evaluation",
786
- "Extraction",
787
- "Multi-modal",
788
- "Self-checking",
789
- "Sentiment analysis",
790
- "SQL",
791
- "Summarization",
792
- "Tagging",
793
- "Translation (document)",
794
- "Translation (sentences)",
904
+ ListPromptVersionsVoice = Literal[
905
+ "alloy",
906
+ "echo",
907
+ "fable",
908
+ "onyx",
909
+ "nova",
910
+ "shimmer",
795
911
  ]
912
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
796
913
 
797
914
 
798
- ListPromptVersionsLanguage = Literal[
799
- "Chinese",
800
- "Dutch",
801
- "English",
802
- "French",
803
- "German",
804
- "Russian",
805
- "Spanish",
915
+ ListPromptVersionsPromptsFormat = Literal[
916
+ "wav",
917
+ "mp3",
918
+ "flac",
919
+ "opus",
920
+ "pcm16",
806
921
  ]
807
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
922
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
808
923
 
809
924
 
810
- class ListPromptVersionsMetadataTypedDict(TypedDict):
811
- use_cases: NotRequired[List[ListPromptVersionsUseCases]]
812
- r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
813
- language: NotRequired[Nullable[ListPromptVersionsLanguage]]
814
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
925
+ class ListPromptVersionsAudioTypedDict(TypedDict):
926
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
815
927
 
928
+ voice: ListPromptVersionsVoice
929
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
930
+ format_: ListPromptVersionsPromptsFormat
931
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
816
932
 
817
- class ListPromptVersionsMetadata(BaseModel):
818
- use_cases: Optional[List[ListPromptVersionsUseCases]] = None
819
- r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
820
933
 
821
- language: OptionalNullable[ListPromptVersionsLanguage] = UNSET
822
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
934
+ class ListPromptVersionsAudio(BaseModel):
935
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
936
+
937
+ voice: ListPromptVersionsVoice
938
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
939
+
940
+ format_: Annotated[ListPromptVersionsPromptsFormat, pydantic.Field(alias="format")]
941
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
942
+
943
+
944
+ ListPromptVersionsResponseFormatPromptsResponseType = Literal["json_schema",]
945
+
946
+
947
+ class ListPromptVersionsResponseFormatJSONSchemaTypedDict(TypedDict):
948
+ name: str
949
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
950
+ description: NotRequired[str]
951
+ r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
952
+ schema_: NotRequired[Any]
953
+ r"""The schema for the response format, described as a JSON Schema object."""
954
+ strict: NotRequired[bool]
955
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
956
+
957
+
958
+ class ListPromptVersionsResponseFormatJSONSchema(BaseModel):
959
+ name: str
960
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
961
+
962
+ description: Optional[str] = None
963
+ r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
964
+
965
+ schema_: Annotated[Optional[Any], pydantic.Field(alias="schema")] = None
966
+ r"""The schema for the response format, described as a JSON Schema object."""
967
+
968
+ strict: Optional[bool] = False
969
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
823
970
 
824
971
  @model_serializer(mode="wrap")
825
972
  def serialize_model(self, handler):
826
- optional_fields = ["use_cases", "language"]
827
- nullable_fields = ["language"]
828
- null_default_fields = []
829
-
973
+ optional_fields = set(["description", "schema", "strict"])
830
974
  serialized = handler(self)
831
-
832
975
  m = {}
833
976
 
834
977
  for n, f in type(self).model_fields.items():
835
978
  k = f.alias or n
836
979
  val = serialized.get(k)
837
- serialized.pop(k, None)
838
-
839
- optional_nullable = k in optional_fields and k in nullable_fields
840
- is_set = (
841
- self.__pydantic_fields_set__.intersection({n})
842
- or k in null_default_fields
843
- ) # pylint: disable=no-member
844
980
 
845
- if val is not None and val != UNSET_SENTINEL:
846
- m[k] = val
847
- elif val != UNSET_SENTINEL and (
848
- not k in optional_fields or (optional_nullable and is_set)
849
- ):
850
- m[k] = val
981
+ if val != UNSET_SENTINEL:
982
+ if val is not None or k not in optional_fields:
983
+ m[k] = val
851
984
 
852
985
  return m
853
986
 
854
987
 
855
- class ListPromptVersionsDataTypedDict(TypedDict):
856
- id: str
857
- prompt_config: ListPromptVersionsPromptConfigTypedDict
858
- r"""A list of messages compatible with the openAI schema"""
859
- timestamp: str
860
- created_by_id: NotRequired[Nullable[str]]
861
- updated_by_id: NotRequired[Nullable[str]]
862
- description: NotRequired[Nullable[str]]
863
- r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
864
- metadata: NotRequired[ListPromptVersionsMetadataTypedDict]
988
+ class ListPromptVersionsResponseFormatPromptsJSONSchemaTypedDict(TypedDict):
989
+ r"""
865
990
 
991
+ JSON Schema response format. Used to generate structured JSON responses
992
+ """
866
993
 
867
- class ListPromptVersionsData(BaseModel):
868
- id: Annotated[str, pydantic.Field(alias="_id")]
994
+ type: ListPromptVersionsResponseFormatPromptsResponseType
995
+ json_schema: ListPromptVersionsResponseFormatJSONSchemaTypedDict
869
996
 
870
- prompt_config: ListPromptVersionsPromptConfig
871
- r"""A list of messages compatible with the openAI schema"""
872
997
 
873
- timestamp: str
998
+ class ListPromptVersionsResponseFormatPromptsJSONSchema(BaseModel):
999
+ r"""
874
1000
 
875
- created_by_id: OptionalNullable[str] = UNSET
1001
+ JSON Schema response format. Used to generate structured JSON responses
1002
+ """
876
1003
 
877
- updated_by_id: OptionalNullable[str] = UNSET
1004
+ type: ListPromptVersionsResponseFormatPromptsResponseType
878
1005
 
879
- description: OptionalNullable[str] = UNSET
880
- r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
1006
+ json_schema: ListPromptVersionsResponseFormatJSONSchema
881
1007
 
882
- metadata: Optional[ListPromptVersionsMetadata] = None
883
1008
 
884
- @model_serializer(mode="wrap")
885
- def serialize_model(self, handler):
886
- optional_fields = ["created_by_id", "updated_by_id", "description", "metadata"]
887
- nullable_fields = ["created_by_id", "updated_by_id", "description"]
888
- null_default_fields = []
1009
+ ListPromptVersionsResponseFormatPromptsType = Literal["json_object",]
1010
+
1011
+
1012
+ class ListPromptVersionsResponseFormatJSONObjectTypedDict(TypedDict):
1013
+ r"""
1014
+
1015
+ JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
1016
+ """
1017
+
1018
+ type: ListPromptVersionsResponseFormatPromptsType
1019
+
1020
+
1021
+ class ListPromptVersionsResponseFormatJSONObject(BaseModel):
1022
+ r"""
1023
+
1024
+ JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
1025
+ """
1026
+
1027
+ type: ListPromptVersionsResponseFormatPromptsType
1028
+
1029
+
1030
+ ListPromptVersionsResponseFormatType = Literal["text",]
1031
+
1032
+
1033
+ class ListPromptVersionsResponseFormatTextTypedDict(TypedDict):
1034
+ r"""
1035
+
1036
+ Default response format. Used to generate text responses
1037
+ """
1038
+
1039
+ type: ListPromptVersionsResponseFormatType
889
1040
 
890
- serialized = handler(self)
891
1041
 
1042
+ class ListPromptVersionsResponseFormatText(BaseModel):
1043
+ r"""
1044
+
1045
+ Default response format. Used to generate text responses
1046
+ """
1047
+
1048
+ type: ListPromptVersionsResponseFormatType
1049
+
1050
+
1051
+ ListPromptVersionsResponseFormatTypedDict = TypeAliasType(
1052
+ "ListPromptVersionsResponseFormatTypedDict",
1053
+ Union[
1054
+ ListPromptVersionsResponseFormatTextTypedDict,
1055
+ ListPromptVersionsResponseFormatJSONObjectTypedDict,
1056
+ ListPromptVersionsResponseFormatPromptsJSONSchemaTypedDict,
1057
+ ],
1058
+ )
1059
+ r"""An object specifying the format that the model must output"""
1060
+
1061
+
1062
+ ListPromptVersionsResponseFormat = Annotated[
1063
+ Union[
1064
+ Annotated[ListPromptVersionsResponseFormatText, Tag("text")],
1065
+ Annotated[ListPromptVersionsResponseFormatJSONObject, Tag("json_object")],
1066
+ Annotated[
1067
+ ListPromptVersionsResponseFormatPromptsJSONSchema, Tag("json_schema")
1068
+ ],
1069
+ ],
1070
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1071
+ ]
1072
+ r"""An object specifying the format that the model must output"""
1073
+
1074
+
1075
+ ListPromptVersionsReasoningEffort = Literal[
1076
+ "none",
1077
+ "minimal",
1078
+ "low",
1079
+ "medium",
1080
+ "high",
1081
+ "xhigh",
1082
+ ]
1083
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1084
+
1085
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1086
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1087
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1088
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1089
+
1090
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1091
+ """
1092
+
1093
+
1094
+ ListPromptVersionsStopTypedDict = TypeAliasType(
1095
+ "ListPromptVersionsStopTypedDict", Union[str, List[str]]
1096
+ )
1097
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1098
+
1099
+
1100
+ ListPromptVersionsStop = TypeAliasType("ListPromptVersionsStop", Union[str, List[str]])
1101
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1102
+
1103
+
1104
+ class ListPromptVersionsStreamOptionsTypedDict(TypedDict):
1105
+ r"""Options for streaming response. Only set this when you set stream: true."""
1106
+
1107
+ include_usage: NotRequired[bool]
1108
+ r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
1109
+
1110
+
1111
+ class ListPromptVersionsStreamOptions(BaseModel):
1112
+ r"""Options for streaming response. Only set this when you set stream: true."""
1113
+
1114
+ include_usage: Optional[bool] = None
1115
+ r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
1116
+
1117
+ @model_serializer(mode="wrap")
1118
+ def serialize_model(self, handler):
1119
+ optional_fields = set(["include_usage"])
1120
+ serialized = handler(self)
1121
+ m = {}
1122
+
1123
+ for n, f in type(self).model_fields.items():
1124
+ k = f.alias or n
1125
+ val = serialized.get(k)
1126
+
1127
+ if val != UNSET_SENTINEL:
1128
+ if val is not None or k not in optional_fields:
1129
+ m[k] = val
1130
+
1131
+ return m
1132
+
1133
+
1134
+ ListPromptVersionsThinkingTypedDict = TypeAliasType(
1135
+ "ListPromptVersionsThinkingTypedDict",
1136
+ Union[ThinkingConfigDisabledSchemaTypedDict, ThinkingConfigEnabledSchemaTypedDict],
1137
+ )
1138
+
1139
+
1140
+ ListPromptVersionsThinking = Annotated[
1141
+ Union[
1142
+ Annotated[ThinkingConfigDisabledSchema, Tag("disabled")],
1143
+ Annotated[ThinkingConfigEnabledSchema, Tag("enabled")],
1144
+ ],
1145
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1146
+ ]
1147
+
1148
+
1149
+ ListPromptVersionsToolChoiceType = Literal["function",]
1150
+ r"""The type of the tool. Currently, only function is supported."""
1151
+
1152
+
1153
+ class ListPromptVersionsToolChoiceFunctionTypedDict(TypedDict):
1154
+ name: str
1155
+ r"""The name of the function to call."""
1156
+
1157
+
1158
+ class ListPromptVersionsToolChoiceFunction(BaseModel):
1159
+ name: str
1160
+ r"""The name of the function to call."""
1161
+
1162
+
1163
+ class ListPromptVersionsToolChoice2TypedDict(TypedDict):
1164
+ function: ListPromptVersionsToolChoiceFunctionTypedDict
1165
+ type: NotRequired[ListPromptVersionsToolChoiceType]
1166
+ r"""The type of the tool. Currently, only function is supported."""
1167
+
1168
+
1169
+ class ListPromptVersionsToolChoice2(BaseModel):
1170
+ function: ListPromptVersionsToolChoiceFunction
1171
+
1172
+ type: Optional[ListPromptVersionsToolChoiceType] = None
1173
+ r"""The type of the tool. Currently, only function is supported."""
1174
+
1175
+ @model_serializer(mode="wrap")
1176
+ def serialize_model(self, handler):
1177
+ optional_fields = set(["type"])
1178
+ serialized = handler(self)
1179
+ m = {}
1180
+
1181
+ for n, f in type(self).model_fields.items():
1182
+ k = f.alias or n
1183
+ val = serialized.get(k)
1184
+
1185
+ if val != UNSET_SENTINEL:
1186
+ if val is not None or k not in optional_fields:
1187
+ m[k] = val
1188
+
1189
+ return m
1190
+
1191
+
1192
+ ListPromptVersionsToolChoice1 = Literal[
1193
+ "none",
1194
+ "auto",
1195
+ "required",
1196
+ ]
1197
+
1198
+
1199
+ ListPromptVersionsToolChoiceTypedDict = TypeAliasType(
1200
+ "ListPromptVersionsToolChoiceTypedDict",
1201
+ Union[ListPromptVersionsToolChoice2TypedDict, ListPromptVersionsToolChoice1],
1202
+ )
1203
+ r"""Controls which (if any) tool is called by the model."""
1204
+
1205
+
1206
+ ListPromptVersionsToolChoice = TypeAliasType(
1207
+ "ListPromptVersionsToolChoice",
1208
+ Union[ListPromptVersionsToolChoice2, ListPromptVersionsToolChoice1],
1209
+ )
1210
+ r"""Controls which (if any) tool is called by the model."""
1211
+
1212
+
1213
+ ListPromptVersionsModalities = Literal[
1214
+ "text",
1215
+ "audio",
1216
+ ]
1217
+
1218
+
1219
+ ListPromptVersionsID1 = Literal[
1220
+ "orq_pii_detection",
1221
+ "orq_sexual_moderation",
1222
+ "orq_harmful_moderation",
1223
+ ]
1224
+ r"""The key of the guardrail."""
1225
+
1226
+
1227
+ ListPromptVersionsIDTypedDict = TypeAliasType(
1228
+ "ListPromptVersionsIDTypedDict", Union[ListPromptVersionsID1, str]
1229
+ )
1230
+
1231
+
1232
+ ListPromptVersionsID = TypeAliasType(
1233
+ "ListPromptVersionsID", Union[ListPromptVersionsID1, str]
1234
+ )
1235
+
1236
+
1237
+ ListPromptVersionsExecuteOn = Literal[
1238
+ "input",
1239
+ "output",
1240
+ ]
1241
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1242
+
1243
+
1244
+ class ListPromptVersionsGuardrailsTypedDict(TypedDict):
1245
+ id: ListPromptVersionsIDTypedDict
1246
+ execute_on: ListPromptVersionsExecuteOn
1247
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1248
+
1249
+
1250
+ class ListPromptVersionsGuardrails(BaseModel):
1251
+ id: ListPromptVersionsID
1252
+
1253
+ execute_on: ListPromptVersionsExecuteOn
1254
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1255
+
1256
+
1257
+ class ListPromptVersionsFallbacksTypedDict(TypedDict):
1258
+ model: str
1259
+ r"""Fallback model identifier"""
1260
+
1261
+
1262
+ class ListPromptVersionsFallbacks(BaseModel):
1263
+ model: str
1264
+ r"""Fallback model identifier"""
1265
+
1266
+
1267
+ class ListPromptVersionsRetryTypedDict(TypedDict):
1268
+ r"""Retry configuration for the request"""
1269
+
1270
+ count: NotRequired[float]
1271
+ r"""Number of retry attempts (1-5)"""
1272
+ on_codes: NotRequired[List[float]]
1273
+ r"""HTTP status codes that trigger retry logic"""
1274
+
1275
+
1276
+ class ListPromptVersionsRetry(BaseModel):
1277
+ r"""Retry configuration for the request"""
1278
+
1279
+ count: Optional[float] = 3
1280
+ r"""Number of retry attempts (1-5)"""
1281
+
1282
+ on_codes: Optional[List[float]] = None
1283
+ r"""HTTP status codes that trigger retry logic"""
1284
+
1285
+ @model_serializer(mode="wrap")
1286
+ def serialize_model(self, handler):
1287
+ optional_fields = set(["count", "on_codes"])
1288
+ serialized = handler(self)
1289
+ m = {}
1290
+
1291
+ for n, f in type(self).model_fields.items():
1292
+ k = f.alias or n
1293
+ val = serialized.get(k)
1294
+
1295
+ if val != UNSET_SENTINEL:
1296
+ if val is not None or k not in optional_fields:
1297
+ m[k] = val
1298
+
1299
+ return m
1300
+
1301
+
1302
+ ListPromptVersionsType = Literal["exact_match",]
1303
+
1304
+
1305
+ class ListPromptVersionsCacheTypedDict(TypedDict):
1306
+ r"""Cache configuration for the request."""
1307
+
1308
+ type: ListPromptVersionsType
1309
+ ttl: NotRequired[float]
1310
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1311
+
1312
+
1313
+ class ListPromptVersionsCache(BaseModel):
1314
+ r"""Cache configuration for the request."""
1315
+
1316
+ type: ListPromptVersionsType
1317
+
1318
+ ttl: Optional[float] = 1800
1319
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1320
+
1321
+ @model_serializer(mode="wrap")
1322
+ def serialize_model(self, handler):
1323
+ optional_fields = set(["ttl"])
1324
+ serialized = handler(self)
1325
+ m = {}
1326
+
1327
+ for n, f in type(self).model_fields.items():
1328
+ k = f.alias or n
1329
+ val = serialized.get(k)
1330
+
1331
+ if val != UNSET_SENTINEL:
1332
+ if val is not None or k not in optional_fields:
1333
+ m[k] = val
1334
+
1335
+ return m
1336
+
1337
+
1338
+ ListPromptVersionsLoadBalancerType = Literal["weight_based",]
1339
+
1340
+
1341
+ class ListPromptVersionsLoadBalancerModelsTypedDict(TypedDict):
1342
+ model: str
1343
+ r"""Model identifier for load balancing"""
1344
+ weight: NotRequired[float]
1345
+ r"""Weight assigned to this model for load balancing"""
1346
+
1347
+
1348
+ class ListPromptVersionsLoadBalancerModels(BaseModel):
1349
+ model: str
1350
+ r"""Model identifier for load balancing"""
1351
+
1352
+ weight: Optional[float] = 0.5
1353
+ r"""Weight assigned to this model for load balancing"""
1354
+
1355
+ @model_serializer(mode="wrap")
1356
+ def serialize_model(self, handler):
1357
+ optional_fields = set(["weight"])
1358
+ serialized = handler(self)
1359
+ m = {}
1360
+
1361
+ for n, f in type(self).model_fields.items():
1362
+ k = f.alias or n
1363
+ val = serialized.get(k)
1364
+
1365
+ if val != UNSET_SENTINEL:
1366
+ if val is not None or k not in optional_fields:
1367
+ m[k] = val
1368
+
1369
+ return m
1370
+
1371
+
1372
+ class ListPromptVersionsLoadBalancer1TypedDict(TypedDict):
1373
+ type: ListPromptVersionsLoadBalancerType
1374
+ models: List[ListPromptVersionsLoadBalancerModelsTypedDict]
1375
+
1376
+
1377
+ class ListPromptVersionsLoadBalancer1(BaseModel):
1378
+ type: ListPromptVersionsLoadBalancerType
1379
+
1380
+ models: List[ListPromptVersionsLoadBalancerModels]
1381
+
1382
+
1383
+ ListPromptVersionsLoadBalancerTypedDict = ListPromptVersionsLoadBalancer1TypedDict
1384
+ r"""Load balancer configuration for the request."""
1385
+
1386
+
1387
+ ListPromptVersionsLoadBalancer = ListPromptVersionsLoadBalancer1
1388
+ r"""Load balancer configuration for the request."""
1389
+
1390
+
1391
+ class ListPromptVersionsTimeoutTypedDict(TypedDict):
1392
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1393
+
1394
+ call_timeout: float
1395
+ r"""Timeout value in milliseconds"""
1396
+
1397
+
1398
+ class ListPromptVersionsTimeout(BaseModel):
1399
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1400
+
1401
+ call_timeout: float
1402
+ r"""Timeout value in milliseconds"""
1403
+
1404
+
1405
+ ListPromptVersionsMessagesPromptsResponse200Role = Literal["tool",]
1406
+ r"""The role of the messages author, in this case tool."""
1407
+
1408
+
1409
+ ListPromptVersionsContentPromptsResponse2002TypedDict = TextContentPartSchemaTypedDict
1410
+
1411
+
1412
+ ListPromptVersionsContentPromptsResponse2002 = TextContentPartSchema
1413
+
1414
+
1415
+ ListPromptVersionsMessagesPromptsResponse200ContentTypedDict = TypeAliasType(
1416
+ "ListPromptVersionsMessagesPromptsResponse200ContentTypedDict",
1417
+ Union[str, List[ListPromptVersionsContentPromptsResponse2002TypedDict]],
1418
+ )
1419
+ r"""The contents of the tool message."""
1420
+
1421
+
1422
+ ListPromptVersionsMessagesPromptsResponse200Content = TypeAliasType(
1423
+ "ListPromptVersionsMessagesPromptsResponse200Content",
1424
+ Union[str, List[ListPromptVersionsContentPromptsResponse2002]],
1425
+ )
1426
+ r"""The contents of the tool message."""
1427
+
1428
+
1429
+ ListPromptVersionsMessagesPromptsType = Literal["ephemeral",]
1430
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1431
+
1432
+
1433
+ ListPromptVersionsMessagesTTL = Literal[
1434
+ "5m",
1435
+ "1h",
1436
+ ]
1437
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1438
+
1439
+ - `5m`: 5 minutes
1440
+ - `1h`: 1 hour
1441
+
1442
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1443
+ """
1444
+
1445
+
1446
+ class ListPromptVersionsMessagesCacheControlTypedDict(TypedDict):
1447
+ type: ListPromptVersionsMessagesPromptsType
1448
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1449
+ ttl: NotRequired[ListPromptVersionsMessagesTTL]
1450
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1451
+
1452
+ - `5m`: 5 minutes
1453
+ - `1h`: 1 hour
1454
+
1455
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1456
+ """
1457
+
1458
+
1459
+ class ListPromptVersionsMessagesCacheControl(BaseModel):
1460
+ type: ListPromptVersionsMessagesPromptsType
1461
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1462
+
1463
+ ttl: Optional[ListPromptVersionsMessagesTTL] = "5m"
1464
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1465
+
1466
+ - `5m`: 5 minutes
1467
+ - `1h`: 1 hour
1468
+
1469
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1470
+ """
1471
+
1472
+ @model_serializer(mode="wrap")
1473
+ def serialize_model(self, handler):
1474
+ optional_fields = set(["ttl"])
1475
+ serialized = handler(self)
1476
+ m = {}
1477
+
1478
+ for n, f in type(self).model_fields.items():
1479
+ k = f.alias or n
1480
+ val = serialized.get(k)
1481
+
1482
+ if val != UNSET_SENTINEL:
1483
+ if val is not None or k not in optional_fields:
1484
+ m[k] = val
1485
+
1486
+ return m
1487
+
1488
+
1489
+ class ListPromptVersionsMessagesToolMessageTypedDict(TypedDict):
1490
+ role: ListPromptVersionsMessagesPromptsResponse200Role
1491
+ r"""The role of the messages author, in this case tool."""
1492
+ content: ListPromptVersionsMessagesPromptsResponse200ContentTypedDict
1493
+ r"""The contents of the tool message."""
1494
+ tool_call_id: Nullable[str]
1495
+ r"""Tool call that this message is responding to."""
1496
+ cache_control: NotRequired[ListPromptVersionsMessagesCacheControlTypedDict]
1497
+
1498
+
1499
+ class ListPromptVersionsMessagesToolMessage(BaseModel):
1500
+ role: ListPromptVersionsMessagesPromptsResponse200Role
1501
+ r"""The role of the messages author, in this case tool."""
1502
+
1503
+ content: ListPromptVersionsMessagesPromptsResponse200Content
1504
+ r"""The contents of the tool message."""
1505
+
1506
+ tool_call_id: Nullable[str]
1507
+ r"""Tool call that this message is responding to."""
1508
+
1509
+ cache_control: Optional[ListPromptVersionsMessagesCacheControl] = None
1510
+
1511
+ @model_serializer(mode="wrap")
1512
+ def serialize_model(self, handler):
1513
+ optional_fields = set(["cache_control"])
1514
+ nullable_fields = set(["tool_call_id"])
1515
+ serialized = handler(self)
1516
+ m = {}
1517
+
1518
+ for n, f in type(self).model_fields.items():
1519
+ k = f.alias or n
1520
+ val = serialized.get(k)
1521
+ is_nullable_and_explicitly_set = (
1522
+ k in nullable_fields
1523
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1524
+ )
1525
+
1526
+ if val != UNSET_SENTINEL:
1527
+ if (
1528
+ val is not None
1529
+ or k not in optional_fields
1530
+ or is_nullable_and_explicitly_set
1531
+ ):
1532
+ m[k] = val
1533
+
1534
+ return m
1535
+
1536
+
1537
+ ListPromptVersionsContentPromptsResponse2TypedDict = TypeAliasType(
1538
+ "ListPromptVersionsContentPromptsResponse2TypedDict",
1539
+ Union[
1540
+ RefusalPartSchemaTypedDict,
1541
+ RedactedReasoningPartSchemaTypedDict,
1542
+ TextContentPartSchemaTypedDict,
1543
+ ReasoningPartSchemaTypedDict,
1544
+ ],
1545
+ )
1546
+
1547
+
1548
+ ListPromptVersionsContentPromptsResponse2 = Annotated[
1549
+ Union[
1550
+ Annotated[TextContentPartSchema, Tag("text")],
1551
+ Annotated[RefusalPartSchema, Tag("refusal")],
1552
+ Annotated[ReasoningPartSchema, Tag("reasoning")],
1553
+ Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
1554
+ ],
1555
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1556
+ ]
1557
+
1558
+
1559
+ ListPromptVersionsMessagesPromptsResponseContentTypedDict = TypeAliasType(
1560
+ "ListPromptVersionsMessagesPromptsResponseContentTypedDict",
1561
+ Union[str, List[ListPromptVersionsContentPromptsResponse2TypedDict]],
1562
+ )
1563
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1564
+
1565
+
1566
+ ListPromptVersionsMessagesPromptsResponseContent = TypeAliasType(
1567
+ "ListPromptVersionsMessagesPromptsResponseContent",
1568
+ Union[str, List[ListPromptVersionsContentPromptsResponse2]],
1569
+ )
1570
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1571
+
1572
+
1573
+ ListPromptVersionsMessagesPromptsResponseRole = Literal["assistant",]
1574
+ r"""The role of the messages author, in this case `assistant`."""
1575
+
1576
+
1577
+ class ListPromptVersionsMessagesAudioTypedDict(TypedDict):
1578
+ r"""Data about a previous audio response from the model."""
1579
+
1580
+ id: str
1581
+ r"""Unique identifier for a previous audio response from the model."""
1582
+
1583
+
1584
+ class ListPromptVersionsMessagesAudio(BaseModel):
1585
+ r"""Data about a previous audio response from the model."""
1586
+
1587
+ id: str
1588
+ r"""Unique identifier for a previous audio response from the model."""
1589
+
1590
+
1591
+ ListPromptVersionsMessagesType = Literal["function",]
1592
+ r"""The type of the tool. Currently, only `function` is supported."""
1593
+
1594
+
1595
+ class ListPromptVersionsMessagesFunctionTypedDict(TypedDict):
1596
+ name: NotRequired[str]
1597
+ r"""The name of the function to call."""
1598
+ arguments: NotRequired[str]
1599
+ r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
1600
+
1601
+
1602
+ class ListPromptVersionsMessagesFunction(BaseModel):
1603
+ name: Optional[str] = None
1604
+ r"""The name of the function to call."""
1605
+
1606
+ arguments: Optional[str] = None
1607
+ r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
1608
+
1609
+ @model_serializer(mode="wrap")
1610
+ def serialize_model(self, handler):
1611
+ optional_fields = set(["name", "arguments"])
1612
+ serialized = handler(self)
1613
+ m = {}
1614
+
1615
+ for n, f in type(self).model_fields.items():
1616
+ k = f.alias or n
1617
+ val = serialized.get(k)
1618
+
1619
+ if val != UNSET_SENTINEL:
1620
+ if val is not None or k not in optional_fields:
1621
+ m[k] = val
1622
+
1623
+ return m
1624
+
1625
+
1626
+ class ListPromptVersionsMessagesToolCallsTypedDict(TypedDict):
1627
+ id: str
1628
+ r"""The ID of the tool call."""
1629
+ type: ListPromptVersionsMessagesType
1630
+ r"""The type of the tool. Currently, only `function` is supported."""
1631
+ function: ListPromptVersionsMessagesFunctionTypedDict
1632
+ thought_signature: NotRequired[str]
1633
+ r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
1634
+
1635
+
1636
+ class ListPromptVersionsMessagesToolCalls(BaseModel):
1637
+ id: str
1638
+ r"""The ID of the tool call."""
1639
+
1640
+ type: ListPromptVersionsMessagesType
1641
+ r"""The type of the tool. Currently, only `function` is supported."""
1642
+
1643
+ function: ListPromptVersionsMessagesFunction
1644
+
1645
+ thought_signature: Optional[str] = None
1646
+ r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
1647
+
1648
+ @model_serializer(mode="wrap")
1649
+ def serialize_model(self, handler):
1650
+ optional_fields = set(["thought_signature"])
1651
+ serialized = handler(self)
1652
+ m = {}
1653
+
1654
+ for n, f in type(self).model_fields.items():
1655
+ k = f.alias or n
1656
+ val = serialized.get(k)
1657
+
1658
+ if val != UNSET_SENTINEL:
1659
+ if val is not None or k not in optional_fields:
1660
+ m[k] = val
1661
+
1662
+ return m
1663
+
1664
+
1665
+ class ListPromptVersionsMessagesAssistantMessageTypedDict(TypedDict):
1666
+ role: ListPromptVersionsMessagesPromptsResponseRole
1667
+ r"""The role of the messages author, in this case `assistant`."""
1668
+ content: NotRequired[
1669
+ Nullable[ListPromptVersionsMessagesPromptsResponseContentTypedDict]
1670
+ ]
1671
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1672
+ refusal: NotRequired[Nullable[str]]
1673
+ r"""The refusal message by the assistant."""
1674
+ name: NotRequired[str]
1675
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1676
+ audio: NotRequired[Nullable[ListPromptVersionsMessagesAudioTypedDict]]
1677
+ r"""Data about a previous audio response from the model."""
1678
+ tool_calls: NotRequired[List[ListPromptVersionsMessagesToolCallsTypedDict]]
1679
+ r"""The tool calls generated by the model, such as function calls."""
1680
+
1681
+
1682
+ class ListPromptVersionsMessagesAssistantMessage(BaseModel):
1683
+ role: ListPromptVersionsMessagesPromptsResponseRole
1684
+ r"""The role of the messages author, in this case `assistant`."""
1685
+
1686
+ content: OptionalNullable[ListPromptVersionsMessagesPromptsResponseContent] = UNSET
1687
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1688
+
1689
+ refusal: OptionalNullable[str] = UNSET
1690
+ r"""The refusal message by the assistant."""
1691
+
1692
+ name: Optional[str] = None
1693
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1694
+
1695
+ audio: OptionalNullable[ListPromptVersionsMessagesAudio] = UNSET
1696
+ r"""Data about a previous audio response from the model."""
1697
+
1698
+ tool_calls: Optional[List[ListPromptVersionsMessagesToolCalls]] = None
1699
+ r"""The tool calls generated by the model, such as function calls."""
1700
+
1701
+ @model_serializer(mode="wrap")
1702
+ def serialize_model(self, handler):
1703
+ optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
1704
+ nullable_fields = set(["content", "refusal", "audio"])
1705
+ serialized = handler(self)
1706
+ m = {}
1707
+
1708
+ for n, f in type(self).model_fields.items():
1709
+ k = f.alias or n
1710
+ val = serialized.get(k)
1711
+ is_nullable_and_explicitly_set = (
1712
+ k in nullable_fields
1713
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1714
+ )
1715
+
1716
+ if val != UNSET_SENTINEL:
1717
+ if (
1718
+ val is not None
1719
+ or k not in optional_fields
1720
+ or is_nullable_and_explicitly_set
1721
+ ):
1722
+ m[k] = val
1723
+
1724
+ return m
1725
+
1726
+
1727
+ ListPromptVersionsMessagesPromptsRole = Literal["user",]
1728
+ r"""The role of the messages author, in this case `user`."""
1729
+
1730
+
1731
+ ListPromptVersions2PromptsResponse200Type = Literal["file",]
1732
+ r"""The type of the content part. Always `file`."""
1733
+
1734
+
1735
+ ListPromptVersions2PromptsResponse200ApplicationJSONType = Literal["ephemeral",]
1736
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1737
+
1738
+
1739
+ ListPromptVersions2TTL = Literal[
1740
+ "5m",
1741
+ "1h",
1742
+ ]
1743
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1744
+
1745
+ - `5m`: 5 minutes
1746
+ - `1h`: 1 hour
1747
+
1748
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1749
+ """
1750
+
1751
+
1752
+ class ListPromptVersions2CacheControlTypedDict(TypedDict):
1753
+ type: ListPromptVersions2PromptsResponse200ApplicationJSONType
1754
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1755
+ ttl: NotRequired[ListPromptVersions2TTL]
1756
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1757
+
1758
+ - `5m`: 5 minutes
1759
+ - `1h`: 1 hour
1760
+
1761
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1762
+ """
1763
+
1764
+
1765
+ class ListPromptVersions2CacheControl(BaseModel):
1766
+ type: ListPromptVersions2PromptsResponse200ApplicationJSONType
1767
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1768
+
1769
+ ttl: Optional[ListPromptVersions2TTL] = "5m"
1770
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1771
+
1772
+ - `5m`: 5 minutes
1773
+ - `1h`: 1 hour
1774
+
1775
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1776
+ """
1777
+
1778
+ @model_serializer(mode="wrap")
1779
+ def serialize_model(self, handler):
1780
+ optional_fields = set(["ttl"])
1781
+ serialized = handler(self)
1782
+ m = {}
1783
+
1784
+ for n, f in type(self).model_fields.items():
1785
+ k = f.alias or n
1786
+ val = serialized.get(k)
1787
+
1788
+ if val != UNSET_SENTINEL:
1789
+ if val is not None or k not in optional_fields:
1790
+ m[k] = val
1791
+
1792
+ return m
1793
+
1794
+
1795
+ class ListPromptVersions24TypedDict(TypedDict):
1796
+ type: ListPromptVersions2PromptsResponse200Type
1797
+ r"""The type of the content part. Always `file`."""
1798
+ file: FileContentPartSchemaTypedDict
1799
+ r"""File data for the content part. Must contain either file_data or uri, but not both."""
1800
+ cache_control: NotRequired[ListPromptVersions2CacheControlTypedDict]
1801
+
1802
+
1803
+ class ListPromptVersions24(BaseModel):
1804
+ type: ListPromptVersions2PromptsResponse200Type
1805
+ r"""The type of the content part. Always `file`."""
1806
+
1807
+ file: FileContentPartSchema
1808
+ r"""File data for the content part. Must contain either file_data or uri, but not both."""
1809
+
1810
+ cache_control: Optional[ListPromptVersions2CacheControl] = None
1811
+
1812
+ @model_serializer(mode="wrap")
1813
+ def serialize_model(self, handler):
1814
+ optional_fields = set(["cache_control"])
1815
+ serialized = handler(self)
1816
+ m = {}
1817
+
1818
+ for n, f in type(self).model_fields.items():
1819
+ k = f.alias or n
1820
+ val = serialized.get(k)
1821
+
1822
+ if val != UNSET_SENTINEL:
1823
+ if val is not None or k not in optional_fields:
1824
+ m[k] = val
1825
+
1826
+ return m
1827
+
1828
+
1829
+ ListPromptVersionsContentPrompts2TypedDict = TypeAliasType(
1830
+ "ListPromptVersionsContentPrompts2TypedDict",
1831
+ Union[
1832
+ AudioContentPartSchemaTypedDict,
1833
+ TextContentPartSchemaTypedDict,
1834
+ ImageContentPartSchemaTypedDict,
1835
+ ListPromptVersions24TypedDict,
1836
+ ],
1837
+ )
1838
+
1839
+
1840
+ ListPromptVersionsContentPrompts2 = Annotated[
1841
+ Union[
1842
+ Annotated[TextContentPartSchema, Tag("text")],
1843
+ Annotated[ImageContentPartSchema, Tag("image_url")],
1844
+ Annotated[AudioContentPartSchema, Tag("input_audio")],
1845
+ Annotated[ListPromptVersions24, Tag("file")],
1846
+ ],
1847
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1848
+ ]
1849
+
1850
+
1851
+ ListPromptVersionsMessagesPromptsContentTypedDict = TypeAliasType(
1852
+ "ListPromptVersionsMessagesPromptsContentTypedDict",
1853
+ Union[str, List[ListPromptVersionsContentPrompts2TypedDict]],
1854
+ )
1855
+ r"""The contents of the user message."""
1856
+
1857
+
1858
+ ListPromptVersionsMessagesPromptsContent = TypeAliasType(
1859
+ "ListPromptVersionsMessagesPromptsContent",
1860
+ Union[str, List[ListPromptVersionsContentPrompts2]],
1861
+ )
1862
+ r"""The contents of the user message."""
1863
+
1864
+
1865
+ class ListPromptVersionsMessagesUserMessageTypedDict(TypedDict):
1866
+ role: ListPromptVersionsMessagesPromptsRole
1867
+ r"""The role of the messages author, in this case `user`."""
1868
+ content: ListPromptVersionsMessagesPromptsContentTypedDict
1869
+ r"""The contents of the user message."""
1870
+ name: NotRequired[str]
1871
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1872
+
1873
+
1874
+ class ListPromptVersionsMessagesUserMessage(BaseModel):
1875
+ role: ListPromptVersionsMessagesPromptsRole
1876
+ r"""The role of the messages author, in this case `user`."""
1877
+
1878
+ content: ListPromptVersionsMessagesPromptsContent
1879
+ r"""The contents of the user message."""
1880
+
1881
+ name: Optional[str] = None
1882
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1883
+
1884
+ @model_serializer(mode="wrap")
1885
+ def serialize_model(self, handler):
1886
+ optional_fields = set(["name"])
1887
+ serialized = handler(self)
1888
+ m = {}
1889
+
1890
+ for n, f in type(self).model_fields.items():
1891
+ k = f.alias or n
1892
+ val = serialized.get(k)
1893
+
1894
+ if val != UNSET_SENTINEL:
1895
+ if val is not None or k not in optional_fields:
1896
+ m[k] = val
1897
+
1898
+ return m
1899
+
1900
+
1901
+ ListPromptVersionsMessagesRole = Literal["system",]
1902
+ r"""The role of the messages author, in this case `system`."""
1903
+
1904
+
1905
+ ListPromptVersionsMessagesContentTypedDict = TypeAliasType(
1906
+ "ListPromptVersionsMessagesContentTypedDict",
1907
+ Union[str, List[TextContentPartSchemaTypedDict]],
1908
+ )
1909
+ r"""The contents of the system message."""
1910
+
1911
+
1912
+ ListPromptVersionsMessagesContent = TypeAliasType(
1913
+ "ListPromptVersionsMessagesContent", Union[str, List[TextContentPartSchema]]
1914
+ )
1915
+ r"""The contents of the system message."""
1916
+
1917
+
1918
+ class ListPromptVersionsMessagesSystemMessageTypedDict(TypedDict):
1919
+ r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
1920
+
1921
+ role: ListPromptVersionsMessagesRole
1922
+ r"""The role of the messages author, in this case `system`."""
1923
+ content: ListPromptVersionsMessagesContentTypedDict
1924
+ r"""The contents of the system message."""
1925
+ name: NotRequired[str]
1926
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1927
+
1928
+
1929
+ class ListPromptVersionsMessagesSystemMessage(BaseModel):
1930
+ r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
1931
+
1932
+ role: ListPromptVersionsMessagesRole
1933
+ r"""The role of the messages author, in this case `system`."""
1934
+
1935
+ content: ListPromptVersionsMessagesContent
1936
+ r"""The contents of the system message."""
1937
+
1938
+ name: Optional[str] = None
1939
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1940
+
1941
+ @model_serializer(mode="wrap")
1942
+ def serialize_model(self, handler):
1943
+ optional_fields = set(["name"])
1944
+ serialized = handler(self)
1945
+ m = {}
1946
+
1947
+ for n, f in type(self).model_fields.items():
1948
+ k = f.alias or n
1949
+ val = serialized.get(k)
1950
+
1951
+ if val != UNSET_SENTINEL:
1952
+ if val is not None or k not in optional_fields:
1953
+ m[k] = val
1954
+
1955
+ return m
1956
+
1957
+
1958
+ ListPromptVersionsPromptsMessagesTypedDict = TypeAliasType(
1959
+ "ListPromptVersionsPromptsMessagesTypedDict",
1960
+ Union[
1961
+ ListPromptVersionsMessagesSystemMessageTypedDict,
1962
+ ListPromptVersionsMessagesUserMessageTypedDict,
1963
+ ListPromptVersionsMessagesToolMessageTypedDict,
1964
+ ListPromptVersionsMessagesAssistantMessageTypedDict,
1965
+ ],
1966
+ )
1967
+
1968
+
1969
+ ListPromptVersionsPromptsMessages = Annotated[
1970
+ Union[
1971
+ Annotated[ListPromptVersionsMessagesSystemMessage, Tag("system")],
1972
+ Annotated[ListPromptVersionsMessagesUserMessage, Tag("user")],
1973
+ Annotated[ListPromptVersionsMessagesAssistantMessage, Tag("assistant")],
1974
+ Annotated[ListPromptVersionsMessagesToolMessage, Tag("tool")],
1975
+ ],
1976
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
1977
+ ]
1978
+
1979
+
1980
+ class ListPromptVersionsPromptFieldTypedDict(TypedDict):
1981
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
1982
+
1983
+ audio: NotRequired[Nullable[ListPromptVersionsAudioTypedDict]]
1984
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1985
+ frequency_penalty: NotRequired[Nullable[float]]
1986
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
1987
+ max_tokens: NotRequired[Nullable[int]]
1988
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
1989
+
1990
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
1991
+ """
1992
+ max_completion_tokens: NotRequired[Nullable[int]]
1993
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
1994
+ logprobs: NotRequired[Nullable[bool]]
1995
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
1996
+ top_logprobs: NotRequired[Nullable[int]]
1997
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
1998
+ n: NotRequired[Nullable[int]]
1999
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
2000
+ presence_penalty: NotRequired[Nullable[float]]
2001
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
2002
+ response_format: NotRequired[ListPromptVersionsResponseFormatTypedDict]
2003
+ r"""An object specifying the format that the model must output"""
2004
+ reasoning_effort: NotRequired[ListPromptVersionsReasoningEffort]
2005
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
2006
+
2007
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
2008
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
2009
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
2010
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
2011
+
2012
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
2013
+ """
2014
+ verbosity: NotRequired[str]
2015
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
2016
+ seed: NotRequired[Nullable[float]]
2017
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
2018
+ stop: NotRequired[Nullable[ListPromptVersionsStopTypedDict]]
2019
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
2020
+ stream_options: NotRequired[Nullable[ListPromptVersionsStreamOptionsTypedDict]]
2021
+ r"""Options for streaming response. Only set this when you set stream: true."""
2022
+ thinking: NotRequired[ListPromptVersionsThinkingTypedDict]
2023
+ temperature: NotRequired[Nullable[float]]
2024
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
2025
+ top_p: NotRequired[Nullable[float]]
2026
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
2027
+ top_k: NotRequired[Nullable[float]]
2028
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
2029
+ tool_choice: NotRequired[ListPromptVersionsToolChoiceTypedDict]
2030
+ r"""Controls which (if any) tool is called by the model."""
2031
+ parallel_tool_calls: NotRequired[bool]
2032
+ r"""Whether to enable parallel function calling during tool use."""
2033
+ modalities: NotRequired[Nullable[List[ListPromptVersionsModalities]]]
2034
+ r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
2035
+ guardrails: NotRequired[List[ListPromptVersionsGuardrailsTypedDict]]
2036
+ r"""A list of guardrails to apply to the request."""
2037
+ fallbacks: NotRequired[List[ListPromptVersionsFallbacksTypedDict]]
2038
+ r"""Array of fallback models to use if primary model fails"""
2039
+ retry: NotRequired[ListPromptVersionsRetryTypedDict]
2040
+ r"""Retry configuration for the request"""
2041
+ cache: NotRequired[ListPromptVersionsCacheTypedDict]
2042
+ r"""Cache configuration for the request."""
2043
+ load_balancer: NotRequired[ListPromptVersionsLoadBalancerTypedDict]
2044
+ r"""Load balancer configuration for the request."""
2045
+ timeout: NotRequired[ListPromptVersionsTimeoutTypedDict]
2046
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
2047
+ messages: NotRequired[List[ListPromptVersionsPromptsMessagesTypedDict]]
2048
+ r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
2049
+ model: NotRequired[Nullable[str]]
2050
+ r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
2051
+ version: NotRequired[str]
2052
+
2053
+
2054
+ class ListPromptVersionsPromptField(BaseModel):
2055
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2056
+
2057
+ audio: OptionalNullable[ListPromptVersionsAudio] = UNSET
2058
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
2059
+
2060
+ frequency_penalty: OptionalNullable[float] = UNSET
2061
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
2062
+
2063
+ max_tokens: OptionalNullable[int] = UNSET
2064
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
2065
+
2066
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
2067
+ """
2068
+
2069
+ max_completion_tokens: OptionalNullable[int] = UNSET
2070
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
2071
+
2072
+ logprobs: OptionalNullable[bool] = UNSET
2073
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
2074
+
2075
+ top_logprobs: OptionalNullable[int] = UNSET
2076
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
2077
+
2078
+ n: OptionalNullable[int] = UNSET
2079
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
2080
+
2081
+ presence_penalty: OptionalNullable[float] = UNSET
2082
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
2083
+
2084
+ response_format: Optional[ListPromptVersionsResponseFormat] = None
2085
+ r"""An object specifying the format that the model must output"""
2086
+
2087
+ reasoning_effort: Optional[ListPromptVersionsReasoningEffort] = None
2088
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
2089
+
2090
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
2091
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
2092
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
2093
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
2094
+
2095
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
2096
+ """
2097
+
2098
+ verbosity: Optional[str] = None
2099
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
2100
+
2101
+ seed: OptionalNullable[float] = UNSET
2102
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
2103
+
2104
+ stop: OptionalNullable[ListPromptVersionsStop] = UNSET
2105
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
2106
+
2107
+ stream_options: OptionalNullable[ListPromptVersionsStreamOptions] = UNSET
2108
+ r"""Options for streaming response. Only set this when you set stream: true."""
2109
+
2110
+ thinking: Optional[ListPromptVersionsThinking] = None
2111
+
2112
+ temperature: OptionalNullable[float] = UNSET
2113
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
2114
+
2115
+ top_p: OptionalNullable[float] = UNSET
2116
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
2117
+
2118
+ top_k: OptionalNullable[float] = UNSET
2119
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
2120
+
2121
+ tool_choice: Optional[ListPromptVersionsToolChoice] = None
2122
+ r"""Controls which (if any) tool is called by the model."""
2123
+
2124
+ parallel_tool_calls: Optional[bool] = None
2125
+ r"""Whether to enable parallel function calling during tool use."""
2126
+
2127
+ modalities: OptionalNullable[List[ListPromptVersionsModalities]] = UNSET
2128
+ r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
2129
+
2130
+ guardrails: Optional[List[ListPromptVersionsGuardrails]] = None
2131
+ r"""A list of guardrails to apply to the request."""
2132
+
2133
+ fallbacks: Optional[List[ListPromptVersionsFallbacks]] = None
2134
+ r"""Array of fallback models to use if primary model fails"""
2135
+
2136
+ retry: Optional[ListPromptVersionsRetry] = None
2137
+ r"""Retry configuration for the request"""
2138
+
2139
+ cache: Optional[ListPromptVersionsCache] = None
2140
+ r"""Cache configuration for the request."""
2141
+
2142
+ load_balancer: Optional[ListPromptVersionsLoadBalancer] = None
2143
+ r"""Load balancer configuration for the request."""
2144
+
2145
+ timeout: Optional[ListPromptVersionsTimeout] = None
2146
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
2147
+
2148
+ messages: Optional[List[ListPromptVersionsPromptsMessages]] = None
2149
+ r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
2150
+
2151
+ model: OptionalNullable[str] = UNSET
2152
+ r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
2153
+
2154
+ version: Optional[str] = None
2155
+
2156
+ @model_serializer(mode="wrap")
2157
+ def serialize_model(self, handler):
2158
+ optional_fields = set(
2159
+ [
2160
+ "audio",
2161
+ "frequency_penalty",
2162
+ "max_tokens",
2163
+ "max_completion_tokens",
2164
+ "logprobs",
2165
+ "top_logprobs",
2166
+ "n",
2167
+ "presence_penalty",
2168
+ "response_format",
2169
+ "reasoning_effort",
2170
+ "verbosity",
2171
+ "seed",
2172
+ "stop",
2173
+ "stream_options",
2174
+ "thinking",
2175
+ "temperature",
2176
+ "top_p",
2177
+ "top_k",
2178
+ "tool_choice",
2179
+ "parallel_tool_calls",
2180
+ "modalities",
2181
+ "guardrails",
2182
+ "fallbacks",
2183
+ "retry",
2184
+ "cache",
2185
+ "load_balancer",
2186
+ "timeout",
2187
+ "messages",
2188
+ "model",
2189
+ "version",
2190
+ ]
2191
+ )
2192
+ nullable_fields = set(
2193
+ [
2194
+ "audio",
2195
+ "frequency_penalty",
2196
+ "max_tokens",
2197
+ "max_completion_tokens",
2198
+ "logprobs",
2199
+ "top_logprobs",
2200
+ "n",
2201
+ "presence_penalty",
2202
+ "seed",
2203
+ "stop",
2204
+ "stream_options",
2205
+ "temperature",
2206
+ "top_p",
2207
+ "top_k",
2208
+ "modalities",
2209
+ "model",
2210
+ ]
2211
+ )
2212
+ serialized = handler(self)
2213
+ m = {}
2214
+
2215
+ for n, f in type(self).model_fields.items():
2216
+ k = f.alias or n
2217
+ val = serialized.get(k)
2218
+ is_nullable_and_explicitly_set = (
2219
+ k in nullable_fields
2220
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2221
+ )
2222
+
2223
+ if val != UNSET_SENTINEL:
2224
+ if (
2225
+ val is not None
2226
+ or k not in optional_fields
2227
+ or is_nullable_and_explicitly_set
2228
+ ):
2229
+ m[k] = val
2230
+
2231
+ return m
2232
+
2233
+
2234
+ ListPromptVersionsUseCases = Literal[
2235
+ "Agents simulations",
2236
+ "Agents",
2237
+ "API interaction",
2238
+ "Autonomous Agents",
2239
+ "Chatbots",
2240
+ "Classification",
2241
+ "Code understanding",
2242
+ "Code writing",
2243
+ "Conversation",
2244
+ "Documents QA",
2245
+ "Evaluation",
2246
+ "Extraction",
2247
+ "Multi-modal",
2248
+ "Self-checking",
2249
+ "Sentiment analysis",
2250
+ "SQL",
2251
+ "Summarization",
2252
+ "Tagging",
2253
+ "Translation (document)",
2254
+ "Translation (sentences)",
2255
+ ]
2256
+
2257
+
2258
+ ListPromptVersionsLanguage = Literal[
2259
+ "Chinese",
2260
+ "Dutch",
2261
+ "English",
2262
+ "French",
2263
+ "German",
2264
+ "Russian",
2265
+ "Spanish",
2266
+ ]
2267
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2268
+
2269
+
2270
+ class ListPromptVersionsMetadataTypedDict(TypedDict):
2271
+ use_cases: NotRequired[List[ListPromptVersionsUseCases]]
2272
+ r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
2273
+ language: NotRequired[Nullable[ListPromptVersionsLanguage]]
2274
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2275
+
2276
+
2277
+ class ListPromptVersionsMetadata(BaseModel):
2278
+ use_cases: Optional[List[ListPromptVersionsUseCases]] = None
2279
+ r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
2280
+
2281
+ language: OptionalNullable[ListPromptVersionsLanguage] = UNSET
2282
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2283
+
2284
+ @model_serializer(mode="wrap")
2285
+ def serialize_model(self, handler):
2286
+ optional_fields = set(["use_cases", "language"])
2287
+ nullable_fields = set(["language"])
2288
+ serialized = handler(self)
2289
+ m = {}
2290
+
2291
+ for n, f in type(self).model_fields.items():
2292
+ k = f.alias or n
2293
+ val = serialized.get(k)
2294
+ is_nullable_and_explicitly_set = (
2295
+ k in nullable_fields
2296
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2297
+ )
2298
+
2299
+ if val != UNSET_SENTINEL:
2300
+ if (
2301
+ val is not None
2302
+ or k not in optional_fields
2303
+ or is_nullable_and_explicitly_set
2304
+ ):
2305
+ m[k] = val
2306
+
2307
+ return m
2308
+
2309
+
2310
+ class ListPromptVersionsDataTypedDict(TypedDict):
2311
+ id: str
2312
+ prompt: ListPromptVersionsPromptFieldTypedDict
2313
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2314
+ timestamp: str
2315
+ created_by_id: NotRequired[Nullable[str]]
2316
+ updated_by_id: NotRequired[Nullable[str]]
2317
+ description: NotRequired[Nullable[str]]
2318
+ r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
2319
+ prompt_config: NotRequired[ListPromptVersionsPromptConfigTypedDict]
2320
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
2321
+ metadata: NotRequired[ListPromptVersionsMetadataTypedDict]
2322
+
2323
+
2324
+ class ListPromptVersionsData(BaseModel):
2325
+ id: Annotated[str, pydantic.Field(alias="_id")]
2326
+
2327
+ prompt: ListPromptVersionsPromptField
2328
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2329
+
2330
+ timestamp: str
2331
+
2332
+ created_by_id: OptionalNullable[str] = UNSET
2333
+
2334
+ updated_by_id: OptionalNullable[str] = UNSET
2335
+
2336
+ description: OptionalNullable[str] = UNSET
2337
+ r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
2338
+
2339
+ prompt_config: Annotated[
2340
+ Optional[ListPromptVersionsPromptConfig],
2341
+ pydantic.Field(
2342
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
2343
+ ),
2344
+ ] = None
2345
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
2346
+
2347
+ metadata: Optional[ListPromptVersionsMetadata] = None
2348
+
2349
+ @model_serializer(mode="wrap")
2350
+ def serialize_model(self, handler):
2351
+ optional_fields = set(
2352
+ [
2353
+ "created_by_id",
2354
+ "updated_by_id",
2355
+ "description",
2356
+ "prompt_config",
2357
+ "metadata",
2358
+ ]
2359
+ )
2360
+ nullable_fields = set(["created_by_id", "updated_by_id", "description"])
2361
+ serialized = handler(self)
892
2362
  m = {}
893
2363
 
894
2364
  for n, f in type(self).model_fields.items():
895
2365
  k = f.alias or n
896
2366
  val = serialized.get(k)
897
- serialized.pop(k, None)
898
-
899
- optional_nullable = k in optional_fields and k in nullable_fields
900
- is_set = (
901
- self.__pydantic_fields_set__.intersection({n})
902
- or k in null_default_fields
903
- ) # pylint: disable=no-member
904
-
905
- if val is not None and val != UNSET_SENTINEL:
906
- m[k] = val
907
- elif val != UNSET_SENTINEL and (
908
- not k in optional_fields or (optional_nullable and is_set)
909
- ):
910
- m[k] = val
2367
+ is_nullable_and_explicitly_set = (
2368
+ k in nullable_fields
2369
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2370
+ )
2371
+
2372
+ if val != UNSET_SENTINEL:
2373
+ if (
2374
+ val is not None
2375
+ or k not in optional_fields
2376
+ or is_nullable_and_explicitly_set
2377
+ ):
2378
+ m[k] = val
911
2379
 
912
2380
  return m
913
2381