orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -1,6 +1,30 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .audiocontentpartschema import (
5
+ AudioContentPartSchema,
6
+ AudioContentPartSchemaTypedDict,
7
+ )
8
+ from .filecontentpartschema import FileContentPartSchema, FileContentPartSchemaTypedDict
9
+ from .imagecontentpartschema import (
10
+ ImageContentPartSchema,
11
+ ImageContentPartSchemaTypedDict,
12
+ )
13
+ from .reasoningpartschema import ReasoningPartSchema, ReasoningPartSchemaTypedDict
14
+ from .redactedreasoningpartschema import (
15
+ RedactedReasoningPartSchema,
16
+ RedactedReasoningPartSchemaTypedDict,
17
+ )
18
+ from .refusalpartschema import RefusalPartSchema, RefusalPartSchemaTypedDict
19
+ from .textcontentpartschema import TextContentPartSchema, TextContentPartSchemaTypedDict
20
+ from .thinkingconfigdisabledschema import (
21
+ ThinkingConfigDisabledSchema,
22
+ ThinkingConfigDisabledSchemaTypedDict,
23
+ )
24
+ from .thinkingconfigenabledschema import (
25
+ ThinkingConfigEnabledSchema,
26
+ ThinkingConfigEnabledSchemaTypedDict,
27
+ )
4
28
  from orq_ai_sdk.types import (
5
29
  BaseModel,
6
30
  Nullable,
@@ -12,7 +36,13 @@ from orq_ai_sdk.utils import FieldMetadata, PathParamMetadata, get_discriminator
12
36
  import pydantic
13
37
  from pydantic import Discriminator, Tag, model_serializer
14
38
  from typing import Any, Dict, List, Literal, Optional, Union
15
- from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
39
+ from typing_extensions import (
40
+ Annotated,
41
+ NotRequired,
42
+ TypeAliasType,
43
+ TypedDict,
44
+ deprecated,
45
+ )
16
46
 
17
47
 
18
48
  class GetOnePromptRequestTypedDict(TypedDict):
@@ -38,6 +68,7 @@ GetOnePromptModelType = Literal[
38
68
  "tts",
39
69
  "stt",
40
70
  "rerank",
71
+ "ocr",
41
72
  "moderation",
42
73
  "vision",
43
74
  ]
@@ -78,39 +109,43 @@ GetOnePromptResponseFormat4 = Literal[
78
109
  ]
79
110
 
80
111
 
81
- GetOnePromptResponseFormatPromptsResponseType = Literal["text",]
112
+ GetOnePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyType = Literal[
113
+ "text",
114
+ ]
82
115
 
83
116
 
84
117
  class GetOnePromptResponseFormat3TypedDict(TypedDict):
85
- type: GetOnePromptResponseFormatPromptsResponseType
118
+ type: GetOnePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
86
119
 
87
120
 
88
121
  class GetOnePromptResponseFormat3(BaseModel):
89
- type: GetOnePromptResponseFormatPromptsResponseType
122
+ type: GetOnePromptResponseFormatPromptsResponse200ApplicationJSONResponseBodyType
90
123
 
91
124
 
92
- GetOnePromptResponseFormatPromptsType = Literal["json_object",]
125
+ GetOnePromptResponseFormatPromptsResponse200ApplicationJSONType = Literal[
126
+ "json_object",
127
+ ]
93
128
 
94
129
 
95
130
  class GetOnePromptResponseFormat2TypedDict(TypedDict):
96
- type: GetOnePromptResponseFormatPromptsType
131
+ type: GetOnePromptResponseFormatPromptsResponse200ApplicationJSONType
97
132
 
98
133
 
99
134
  class GetOnePromptResponseFormat2(BaseModel):
100
- type: GetOnePromptResponseFormatPromptsType
135
+ type: GetOnePromptResponseFormatPromptsResponse200ApplicationJSONType
101
136
 
102
137
 
103
- GetOnePromptResponseFormatType = Literal["json_schema",]
138
+ GetOnePromptResponseFormatPromptsResponse200Type = Literal["json_schema",]
104
139
 
105
140
 
106
- class GetOnePromptResponseFormatJSONSchemaTypedDict(TypedDict):
141
+ class GetOnePromptResponseFormatPromptsResponseJSONSchemaTypedDict(TypedDict):
107
142
  name: str
108
143
  schema_: Dict[str, Any]
109
144
  description: NotRequired[str]
110
145
  strict: NotRequired[bool]
111
146
 
112
147
 
113
- class GetOnePromptResponseFormatJSONSchema(BaseModel):
148
+ class GetOnePromptResponseFormatPromptsResponseJSONSchema(BaseModel):
114
149
  name: str
115
150
 
116
151
  schema_: Annotated[Dict[str, Any], pydantic.Field(alias="schema")]
@@ -119,23 +154,55 @@ class GetOnePromptResponseFormatJSONSchema(BaseModel):
119
154
 
120
155
  strict: Optional[bool] = None
121
156
 
157
+ @model_serializer(mode="wrap")
158
+ def serialize_model(self, handler):
159
+ optional_fields = set(["description", "strict"])
160
+ serialized = handler(self)
161
+ m = {}
162
+
163
+ for n, f in type(self).model_fields.items():
164
+ k = f.alias or n
165
+ val = serialized.get(k)
166
+
167
+ if val != UNSET_SENTINEL:
168
+ if val is not None or k not in optional_fields:
169
+ m[k] = val
170
+
171
+ return m
172
+
122
173
 
123
174
  class GetOnePromptResponseFormat1TypedDict(TypedDict):
124
- type: GetOnePromptResponseFormatType
125
- json_schema: GetOnePromptResponseFormatJSONSchemaTypedDict
175
+ type: GetOnePromptResponseFormatPromptsResponse200Type
176
+ json_schema: GetOnePromptResponseFormatPromptsResponseJSONSchemaTypedDict
126
177
  display_name: NotRequired[str]
127
178
 
128
179
 
129
180
  class GetOnePromptResponseFormat1(BaseModel):
130
- type: GetOnePromptResponseFormatType
181
+ type: GetOnePromptResponseFormatPromptsResponse200Type
131
182
 
132
- json_schema: GetOnePromptResponseFormatJSONSchema
183
+ json_schema: GetOnePromptResponseFormatPromptsResponseJSONSchema
133
184
 
134
185
  display_name: Optional[str] = None
135
186
 
187
+ @model_serializer(mode="wrap")
188
+ def serialize_model(self, handler):
189
+ optional_fields = set(["display_name"])
190
+ serialized = handler(self)
191
+ m = {}
136
192
 
137
- GetOnePromptResponseFormatTypedDict = TypeAliasType(
138
- "GetOnePromptResponseFormatTypedDict",
193
+ for n, f in type(self).model_fields.items():
194
+ k = f.alias or n
195
+ val = serialized.get(k)
196
+
197
+ if val != UNSET_SENTINEL:
198
+ if val is not None or k not in optional_fields:
199
+ m[k] = val
200
+
201
+ return m
202
+
203
+
204
+ GetOnePromptPromptsResponseFormatTypedDict = TypeAliasType(
205
+ "GetOnePromptPromptsResponseFormatTypedDict",
139
206
  Union[
140
207
  GetOnePromptResponseFormat2TypedDict,
141
208
  GetOnePromptResponseFormat3TypedDict,
@@ -155,8 +222,8 @@ Important: when using JSON mode, you must also instruct the model to produce JSO
155
222
  """
156
223
 
157
224
 
158
- GetOnePromptResponseFormat = TypeAliasType(
159
- "GetOnePromptResponseFormat",
225
+ GetOnePromptPromptsResponseFormat = TypeAliasType(
226
+ "GetOnePromptPromptsResponseFormat",
160
227
  Union[
161
228
  GetOnePromptResponseFormat2,
162
229
  GetOnePromptResponseFormat3,
@@ -190,7 +257,7 @@ GetOnePromptEncodingFormat = Literal[
190
257
  r"""The format to return the embeddings"""
191
258
 
192
259
 
193
- GetOnePromptReasoningEffort = Literal[
260
+ GetOnePromptPromptsReasoningEffort = Literal[
194
261
  "none",
195
262
  "disable",
196
263
  "minimal",
@@ -243,7 +310,7 @@ class GetOnePromptModelParametersTypedDict(TypedDict):
243
310
  r"""Only supported on `image` models."""
244
311
  style: NotRequired[str]
245
312
  r"""Only supported on `image` models."""
246
- response_format: NotRequired[Nullable[GetOnePromptResponseFormatTypedDict]]
313
+ response_format: NotRequired[Nullable[GetOnePromptPromptsResponseFormatTypedDict]]
247
314
  r"""An object specifying the format that the model must output.
248
315
 
249
316
  Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema
@@ -256,7 +323,7 @@ class GetOnePromptModelParametersTypedDict(TypedDict):
256
323
  r"""The version of photoReal to use. Must be v1 or v2. Only available for `leonardoai` provider"""
257
324
  encoding_format: NotRequired[GetOnePromptEncodingFormat]
258
325
  r"""The format to return the embeddings"""
259
- reasoning_effort: NotRequired[GetOnePromptReasoningEffort]
326
+ reasoning_effort: NotRequired[GetOnePromptPromptsReasoningEffort]
260
327
  r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
261
328
  budget_tokens: NotRequired[float]
262
329
  r"""Gives the model enhanced reasoning capabilities for complex tasks. A value of 0 disables thinking. The minimum budget tokens for thinking are 1024. The Budget Tokens should never exceed the Max Tokens parameter. Only supported by `Anthropic`"""
@@ -312,7 +379,7 @@ class GetOnePromptModelParameters(BaseModel):
312
379
  r"""Only supported on `image` models."""
313
380
 
314
381
  response_format: Annotated[
315
- OptionalNullable[GetOnePromptResponseFormat],
382
+ OptionalNullable[GetOnePromptPromptsResponseFormat],
316
383
  pydantic.Field(alias="responseFormat"),
317
384
  ] = UNSET
318
385
  r"""An object specifying the format that the model must output.
@@ -333,7 +400,8 @@ class GetOnePromptModelParameters(BaseModel):
333
400
  r"""The format to return the embeddings"""
334
401
 
335
402
  reasoning_effort: Annotated[
336
- Optional[GetOnePromptReasoningEffort], pydantic.Field(alias="reasoningEffort")
403
+ Optional[GetOnePromptPromptsReasoningEffort],
404
+ pydantic.Field(alias="reasoningEffort"),
337
405
  ] = None
338
406
  r"""Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."""
339
407
 
@@ -352,51 +420,48 @@ class GetOnePromptModelParameters(BaseModel):
352
420
 
353
421
  @model_serializer(mode="wrap")
354
422
  def serialize_model(self, handler):
355
- optional_fields = [
356
- "temperature",
357
- "maxTokens",
358
- "topK",
359
- "topP",
360
- "frequencyPenalty",
361
- "presencePenalty",
362
- "numImages",
363
- "seed",
364
- "format",
365
- "dimensions",
366
- "quality",
367
- "style",
368
- "responseFormat",
369
- "photoRealVersion",
370
- "encoding_format",
371
- "reasoningEffort",
372
- "budgetTokens",
373
- "verbosity",
374
- "thinkingLevel",
375
- ]
376
- nullable_fields = ["responseFormat"]
377
- null_default_fields = []
378
-
423
+ optional_fields = set(
424
+ [
425
+ "temperature",
426
+ "maxTokens",
427
+ "topK",
428
+ "topP",
429
+ "frequencyPenalty",
430
+ "presencePenalty",
431
+ "numImages",
432
+ "seed",
433
+ "format",
434
+ "dimensions",
435
+ "quality",
436
+ "style",
437
+ "responseFormat",
438
+ "photoRealVersion",
439
+ "encoding_format",
440
+ "reasoningEffort",
441
+ "budgetTokens",
442
+ "verbosity",
443
+ "thinkingLevel",
444
+ ]
445
+ )
446
+ nullable_fields = set(["responseFormat"])
379
447
  serialized = handler(self)
380
-
381
448
  m = {}
382
449
 
383
450
  for n, f in type(self).model_fields.items():
384
451
  k = f.alias or n
385
452
  val = serialized.get(k)
386
- serialized.pop(k, None)
387
-
388
- optional_nullable = k in optional_fields and k in nullable_fields
389
- is_set = (
390
- self.__pydantic_fields_set__.intersection({n})
391
- or k in null_default_fields
392
- ) # pylint: disable=no-member
393
-
394
- if val is not None and val != UNSET_SENTINEL:
395
- m[k] = val
396
- elif val != UNSET_SENTINEL and (
397
- not k in optional_fields or (optional_nullable and is_set)
398
- ):
399
- m[k] = val
453
+ is_nullable_and_explicitly_set = (
454
+ k in nullable_fields
455
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
456
+ )
457
+
458
+ if val != UNSET_SENTINEL:
459
+ if (
460
+ val is not None
461
+ or k not in optional_fields
462
+ or is_nullable_and_explicitly_set
463
+ ):
464
+ m[k] = val
400
465
 
401
466
  return m
402
467
 
@@ -472,6 +537,22 @@ class GetOnePrompt2File(BaseModel):
472
537
  filename: Optional[str] = None
473
538
  r"""The name of the file, used when passing the file to the model as a string."""
474
539
 
540
+ @model_serializer(mode="wrap")
541
+ def serialize_model(self, handler):
542
+ optional_fields = set(["file_data", "uri", "mimeType", "filename"])
543
+ serialized = handler(self)
544
+ m = {}
545
+
546
+ for n, f in type(self).model_fields.items():
547
+ k = f.alias or n
548
+ val = serialized.get(k)
549
+
550
+ if val != UNSET_SENTINEL:
551
+ if val is not None or k not in optional_fields:
552
+ m[k] = val
553
+
554
+ return m
555
+
475
556
 
476
557
  class GetOnePrompt23TypedDict(TypedDict):
477
558
  type: GetOnePrompt2PromptsResponseType
@@ -508,6 +589,22 @@ class GetOnePrompt2ImageURL(BaseModel):
508
589
  detail: Optional[str] = None
509
590
  r"""Specifies the detail level of the image. Currently only supported with OpenAI models"""
510
591
 
592
+ @model_serializer(mode="wrap")
593
+ def serialize_model(self, handler):
594
+ optional_fields = set(["id", "detail"])
595
+ serialized = handler(self)
596
+ m = {}
597
+
598
+ for n, f in type(self).model_fields.items():
599
+ k = f.alias or n
600
+ val = serialized.get(k)
601
+
602
+ if val != UNSET_SENTINEL:
603
+ if val is not None or k not in optional_fields:
604
+ m[k] = val
605
+
606
+ return m
607
+
511
608
 
512
609
  class GetOnePrompt22TypedDict(TypedDict):
513
610
  r"""The image part of the prompt message. Only supported with vision models."""
@@ -570,7 +667,7 @@ GetOnePromptContent = TypeAliasType(
570
667
  r"""The contents of the user message. Either the text content of the message or an array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Can be null for tool messages in certain scenarios."""
571
668
 
572
669
 
573
- GetOnePromptPromptsType = Literal["function",]
670
+ GetOnePromptPromptsResponseType = Literal["function",]
574
671
 
575
672
 
576
673
  class GetOnePromptFunctionTypedDict(TypedDict):
@@ -587,14 +684,14 @@ class GetOnePromptFunction(BaseModel):
587
684
 
588
685
 
589
686
  class GetOnePromptToolCallsTypedDict(TypedDict):
590
- type: GetOnePromptPromptsType
687
+ type: GetOnePromptPromptsResponseType
591
688
  function: GetOnePromptFunctionTypedDict
592
689
  id: NotRequired[str]
593
690
  index: NotRequired[float]
594
691
 
595
692
 
596
693
  class GetOnePromptToolCalls(BaseModel):
597
- type: GetOnePromptPromptsType
694
+ type: GetOnePromptPromptsResponseType
598
695
 
599
696
  function: GetOnePromptFunction
600
697
 
@@ -602,6 +699,22 @@ class GetOnePromptToolCalls(BaseModel):
602
699
 
603
700
  index: Optional[float] = None
604
701
 
702
+ @model_serializer(mode="wrap")
703
+ def serialize_model(self, handler):
704
+ optional_fields = set(["id", "index"])
705
+ serialized = handler(self)
706
+ m = {}
707
+
708
+ for n, f in type(self).model_fields.items():
709
+ k = f.alias or n
710
+ val = serialized.get(k)
711
+
712
+ if val != UNSET_SENTINEL:
713
+ if val is not None or k not in optional_fields:
714
+ m[k] = val
715
+
716
+ return m
717
+
605
718
 
606
719
  class GetOnePromptMessagesTypedDict(TypedDict):
607
720
  role: GetOnePromptRole
@@ -625,61 +738,62 @@ class GetOnePromptMessages(BaseModel):
625
738
 
626
739
  @model_serializer(mode="wrap")
627
740
  def serialize_model(self, handler):
628
- optional_fields = ["tool_calls", "tool_call_id"]
629
- nullable_fields = ["content", "tool_call_id"]
630
- null_default_fields = []
631
-
741
+ optional_fields = set(["tool_calls", "tool_call_id"])
742
+ nullable_fields = set(["content", "tool_call_id"])
632
743
  serialized = handler(self)
633
-
634
744
  m = {}
635
745
 
636
746
  for n, f in type(self).model_fields.items():
637
747
  k = f.alias or n
638
748
  val = serialized.get(k)
639
- serialized.pop(k, None)
640
-
641
- optional_nullable = k in optional_fields and k in nullable_fields
642
- is_set = (
643
- self.__pydantic_fields_set__.intersection({n})
644
- or k in null_default_fields
645
- ) # pylint: disable=no-member
646
-
647
- if val is not None and val != UNSET_SENTINEL:
648
- m[k] = val
649
- elif val != UNSET_SENTINEL and (
650
- not k in optional_fields or (optional_nullable and is_set)
651
- ):
652
- m[k] = val
749
+ is_nullable_and_explicitly_set = (
750
+ k in nullable_fields
751
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
752
+ )
753
+
754
+ if val != UNSET_SENTINEL:
755
+ if (
756
+ val is not None
757
+ or k not in optional_fields
758
+ or is_nullable_and_explicitly_set
759
+ ):
760
+ m[k] = val
653
761
 
654
762
  return m
655
763
 
656
764
 
765
+ @deprecated(
766
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
767
+ )
657
768
  class GetOnePromptPromptConfigTypedDict(TypedDict):
658
- r"""A list of messages compatible with the openAI schema"""
769
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
659
770
 
660
771
  messages: List[GetOnePromptMessagesTypedDict]
661
772
  stream: NotRequired[bool]
662
- model: NotRequired[str]
773
+ model: NotRequired[Nullable[str]]
663
774
  model_db_id: NotRequired[Nullable[str]]
664
775
  r"""The id of the resource"""
665
776
  model_type: NotRequired[Nullable[GetOnePromptModelType]]
666
777
  r"""The modality of the model"""
667
778
  model_parameters: NotRequired[GetOnePromptModelParametersTypedDict]
668
779
  r"""Model Parameters: Not all parameters apply to every model"""
669
- provider: NotRequired[GetOnePromptProvider]
780
+ provider: NotRequired[Nullable[GetOnePromptProvider]]
670
781
  integration_id: NotRequired[Nullable[str]]
671
782
  r"""The ID of the integration to use"""
672
783
  version: NotRequired[str]
673
784
 
674
785
 
786
+ @deprecated(
787
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
788
+ )
675
789
  class GetOnePromptPromptConfig(BaseModel):
676
- r"""A list of messages compatible with the openAI schema"""
790
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
677
791
 
678
792
  messages: List[GetOnePromptMessages]
679
793
 
680
794
  stream: Optional[bool] = None
681
795
 
682
- model: Optional[str] = None
796
+ model: OptionalNullable[str] = UNSET
683
797
 
684
798
  model_db_id: OptionalNullable[str] = UNSET
685
799
  r"""The id of the resource"""
@@ -690,7 +804,7 @@ class GetOnePromptPromptConfig(BaseModel):
690
804
  model_parameters: Optional[GetOnePromptModelParameters] = None
691
805
  r"""Model Parameters: Not all parameters apply to every model"""
692
806
 
693
- provider: Optional[GetOnePromptProvider] = None
807
+ provider: OptionalNullable[GetOnePromptProvider] = UNSET
694
808
 
695
809
  integration_id: OptionalNullable[str] = UNSET
696
810
  r"""The ID of the integration to use"""
@@ -699,201 +813,1530 @@ class GetOnePromptPromptConfig(BaseModel):
699
813
 
700
814
  @model_serializer(mode="wrap")
701
815
  def serialize_model(self, handler):
702
- optional_fields = [
703
- "stream",
704
- "model",
705
- "model_db_id",
706
- "model_type",
707
- "model_parameters",
708
- "provider",
709
- "integration_id",
710
- "version",
711
- ]
712
- nullable_fields = ["model_db_id", "model_type", "integration_id"]
713
- null_default_fields = []
714
-
816
+ optional_fields = set(
817
+ [
818
+ "stream",
819
+ "model",
820
+ "model_db_id",
821
+ "model_type",
822
+ "model_parameters",
823
+ "provider",
824
+ "integration_id",
825
+ "version",
826
+ ]
827
+ )
828
+ nullable_fields = set(
829
+ ["model", "model_db_id", "model_type", "provider", "integration_id"]
830
+ )
715
831
  serialized = handler(self)
716
-
717
832
  m = {}
718
833
 
719
834
  for n, f in type(self).model_fields.items():
720
835
  k = f.alias or n
721
836
  val = serialized.get(k)
722
- serialized.pop(k, None)
723
-
724
- optional_nullable = k in optional_fields and k in nullable_fields
725
- is_set = (
726
- self.__pydantic_fields_set__.intersection({n})
727
- or k in null_default_fields
728
- ) # pylint: disable=no-member
729
-
730
- if val is not None and val != UNSET_SENTINEL:
731
- m[k] = val
732
- elif val != UNSET_SENTINEL and (
733
- not k in optional_fields or (optional_nullable and is_set)
734
- ):
735
- m[k] = val
837
+ is_nullable_and_explicitly_set = (
838
+ k in nullable_fields
839
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
840
+ )
841
+
842
+ if val != UNSET_SENTINEL:
843
+ if (
844
+ val is not None
845
+ or k not in optional_fields
846
+ or is_nullable_and_explicitly_set
847
+ ):
848
+ m[k] = val
736
849
 
737
850
  return m
738
851
 
739
852
 
740
- GetOnePromptUseCases = Literal[
741
- "Agents simulations",
742
- "Agents",
743
- "API interaction",
744
- "Autonomous Agents",
745
- "Chatbots",
746
- "Classification",
747
- "Code understanding",
748
- "Code writing",
749
- "Conversation",
750
- "Documents QA",
751
- "Evaluation",
752
- "Extraction",
753
- "Multi-modal",
754
- "Self-checking",
755
- "Sentiment analysis",
756
- "SQL",
757
- "Summarization",
758
- "Tagging",
759
- "Translation (document)",
760
- "Translation (sentences)",
853
+ GetOnePromptVoice = Literal[
854
+ "alloy",
855
+ "echo",
856
+ "fable",
857
+ "onyx",
858
+ "nova",
859
+ "shimmer",
761
860
  ]
861
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
762
862
 
763
863
 
764
- GetOnePromptLanguage = Literal[
765
- "Chinese",
766
- "Dutch",
767
- "English",
768
- "French",
769
- "German",
770
- "Russian",
771
- "Spanish",
864
+ GetOnePromptPromptsFormat = Literal[
865
+ "wav",
866
+ "mp3",
867
+ "flac",
868
+ "opus",
869
+ "pcm16",
772
870
  ]
773
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
871
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
774
872
 
775
873
 
776
- class GetOnePromptMetadataTypedDict(TypedDict):
777
- use_cases: NotRequired[List[GetOnePromptUseCases]]
778
- r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
779
- language: NotRequired[Nullable[GetOnePromptLanguage]]
780
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
874
+ class GetOnePromptAudioTypedDict(TypedDict):
875
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
781
876
 
877
+ voice: GetOnePromptVoice
878
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
879
+ format_: GetOnePromptPromptsFormat
880
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
782
881
 
783
- class GetOnePromptMetadata(BaseModel):
784
- use_cases: Optional[List[GetOnePromptUseCases]] = None
785
- r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
786
882
 
787
- language: OptionalNullable[GetOnePromptLanguage] = UNSET
788
- r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
883
+ class GetOnePromptAudio(BaseModel):
884
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
885
+
886
+ voice: GetOnePromptVoice
887
+ r"""The voice the model uses to respond. Supported voices are alloy, echo, fable, onyx, nova, and shimmer."""
888
+
889
+ format_: Annotated[GetOnePromptPromptsFormat, pydantic.Field(alias="format")]
890
+ r"""Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16."""
891
+
892
+
893
+ GetOnePromptResponseFormatPromptsResponseType = Literal["json_schema",]
894
+
895
+
896
+ class GetOnePromptResponseFormatJSONSchemaTypedDict(TypedDict):
897
+ name: str
898
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
899
+ description: NotRequired[str]
900
+ r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
901
+ schema_: NotRequired[Any]
902
+ r"""The schema for the response format, described as a JSON Schema object."""
903
+ strict: NotRequired[bool]
904
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
905
+
906
+
907
+ class GetOnePromptResponseFormatJSONSchema(BaseModel):
908
+ name: str
909
+ r"""The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."""
910
+
911
+ description: Optional[str] = None
912
+ r"""A description of what the response format is for, used by the model to determine how to respond in the format."""
913
+
914
+ schema_: Annotated[Optional[Any], pydantic.Field(alias="schema")] = None
915
+ r"""The schema for the response format, described as a JSON Schema object."""
916
+
917
+ strict: Optional[bool] = False
918
+ r"""Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true."""
789
919
 
790
920
  @model_serializer(mode="wrap")
791
921
  def serialize_model(self, handler):
792
- optional_fields = ["use_cases", "language"]
793
- nullable_fields = ["language"]
794
- null_default_fields = []
795
-
922
+ optional_fields = set(["description", "schema", "strict"])
796
923
  serialized = handler(self)
797
-
798
924
  m = {}
799
925
 
800
926
  for n, f in type(self).model_fields.items():
801
927
  k = f.alias or n
802
928
  val = serialized.get(k)
803
- serialized.pop(k, None)
804
-
805
- optional_nullable = k in optional_fields and k in nullable_fields
806
- is_set = (
807
- self.__pydantic_fields_set__.intersection({n})
808
- or k in null_default_fields
809
- ) # pylint: disable=no-member
810
929
 
811
- if val is not None and val != UNSET_SENTINEL:
812
- m[k] = val
813
- elif val != UNSET_SENTINEL and (
814
- not k in optional_fields or (optional_nullable and is_set)
815
- ):
816
- m[k] = val
930
+ if val != UNSET_SENTINEL:
931
+ if val is not None or k not in optional_fields:
932
+ m[k] = val
817
933
 
818
934
  return m
819
935
 
820
936
 
821
- class GetOnePromptPromptTypedDict(TypedDict):
822
- r"""A prompt entity with configuration, metadata, and versioning."""
937
+ class GetOnePromptResponseFormatPromptsJSONSchemaTypedDict(TypedDict):
938
+ r"""
823
939
 
824
- id: str
825
- type: GetOnePromptType
826
- owner: str
827
- domain_id: str
828
- created: str
829
- updated: str
830
- display_name: str
831
- r"""The prompt’s name, meant to be displayable in the UI."""
832
- prompt_config: GetOnePromptPromptConfigTypedDict
833
- r"""A list of messages compatible with the openAI schema"""
834
- created_by_id: NotRequired[Nullable[str]]
835
- updated_by_id: NotRequired[Nullable[str]]
836
- description: NotRequired[Nullable[str]]
837
- r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
838
- metadata: NotRequired[GetOnePromptMetadataTypedDict]
940
+ JSON Schema response format. Used to generate structured JSON responses
941
+ """
839
942
 
943
+ type: GetOnePromptResponseFormatPromptsResponseType
944
+ json_schema: GetOnePromptResponseFormatJSONSchemaTypedDict
840
945
 
841
- class GetOnePromptPrompt(BaseModel):
842
- r"""A prompt entity with configuration, metadata, and versioning."""
843
946
 
844
- id: Annotated[str, pydantic.Field(alias="_id")]
947
+ class GetOnePromptResponseFormatPromptsJSONSchema(BaseModel):
948
+ r"""
845
949
 
846
- type: GetOnePromptType
950
+ JSON Schema response format. Used to generate structured JSON responses
951
+ """
847
952
 
848
- owner: str
953
+ type: GetOnePromptResponseFormatPromptsResponseType
849
954
 
850
- domain_id: str
955
+ json_schema: GetOnePromptResponseFormatJSONSchema
851
956
 
852
- created: str
853
957
 
854
- updated: str
958
+ GetOnePromptResponseFormatPromptsType = Literal["json_object",]
855
959
 
856
- display_name: str
857
- r"""The prompt’s name, meant to be displayable in the UI."""
858
960
 
859
- prompt_config: GetOnePromptPromptConfig
860
- r"""A list of messages compatible with the openAI schema"""
961
+ class GetOnePromptResponseFormatJSONObjectTypedDict(TypedDict):
962
+ r"""
861
963
 
862
- created_by_id: OptionalNullable[str] = UNSET
964
+ JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
965
+ """
863
966
 
864
- updated_by_id: OptionalNullable[str] = UNSET
967
+ type: GetOnePromptResponseFormatPromptsType
865
968
 
866
- description: OptionalNullable[str] = UNSET
867
- r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
868
969
 
869
- metadata: Optional[GetOnePromptMetadata] = None
970
+ class GetOnePromptResponseFormatJSONObject(BaseModel):
971
+ r"""
870
972
 
871
- @model_serializer(mode="wrap")
872
- def serialize_model(self, handler):
873
- optional_fields = ["created_by_id", "updated_by_id", "description", "metadata"]
874
- nullable_fields = ["created_by_id", "updated_by_id", "description"]
875
- null_default_fields = []
973
+ JSON object response format. An older method of generating JSON responses. Using `json_schema` is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.
974
+ """
876
975
 
877
- serialized = handler(self)
976
+ type: GetOnePromptResponseFormatPromptsType
977
+
978
+
979
+ GetOnePromptResponseFormatType = Literal["text",]
980
+
981
+
982
+ class GetOnePromptResponseFormatTextTypedDict(TypedDict):
983
+ r"""
984
+
985
+ Default response format. Used to generate text responses
986
+ """
987
+
988
+ type: GetOnePromptResponseFormatType
989
+
990
+
991
+ class GetOnePromptResponseFormatText(BaseModel):
992
+ r"""
993
+
994
+ Default response format. Used to generate text responses
995
+ """
996
+
997
+ type: GetOnePromptResponseFormatType
998
+
999
+
1000
+ GetOnePromptResponseFormatTypedDict = TypeAliasType(
1001
+ "GetOnePromptResponseFormatTypedDict",
1002
+ Union[
1003
+ GetOnePromptResponseFormatTextTypedDict,
1004
+ GetOnePromptResponseFormatJSONObjectTypedDict,
1005
+ GetOnePromptResponseFormatPromptsJSONSchemaTypedDict,
1006
+ ],
1007
+ )
1008
+ r"""An object specifying the format that the model must output"""
1009
+
1010
+
1011
+ GetOnePromptResponseFormat = Annotated[
1012
+ Union[
1013
+ Annotated[GetOnePromptResponseFormatText, Tag("text")],
1014
+ Annotated[GetOnePromptResponseFormatJSONObject, Tag("json_object")],
1015
+ Annotated[GetOnePromptResponseFormatPromptsJSONSchema, Tag("json_schema")],
1016
+ ],
1017
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1018
+ ]
1019
+ r"""An object specifying the format that the model must output"""
1020
+
1021
+
1022
+ GetOnePromptReasoningEffort = Literal[
1023
+ "none",
1024
+ "minimal",
1025
+ "low",
1026
+ "medium",
1027
+ "high",
1028
+ "xhigh",
1029
+ ]
1030
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1031
+
1032
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1033
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1034
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1035
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1036
+
1037
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1038
+ """
1039
+
1040
+
1041
+ GetOnePromptStopTypedDict = TypeAliasType(
1042
+ "GetOnePromptStopTypedDict", Union[str, List[str]]
1043
+ )
1044
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1045
+
1046
+
1047
+ GetOnePromptStop = TypeAliasType("GetOnePromptStop", Union[str, List[str]])
1048
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1049
+
1050
+
1051
+ class GetOnePromptStreamOptionsTypedDict(TypedDict):
1052
+ r"""Options for streaming response. Only set this when you set stream: true."""
878
1053
 
1054
+ include_usage: NotRequired[bool]
1055
+ r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
1056
+
1057
+
1058
+ class GetOnePromptStreamOptions(BaseModel):
1059
+ r"""Options for streaming response. Only set this when you set stream: true."""
1060
+
1061
+ include_usage: Optional[bool] = None
1062
+ r"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value."""
1063
+
1064
+ @model_serializer(mode="wrap")
1065
+ def serialize_model(self, handler):
1066
+ optional_fields = set(["include_usage"])
1067
+ serialized = handler(self)
1068
+ m = {}
1069
+
1070
+ for n, f in type(self).model_fields.items():
1071
+ k = f.alias or n
1072
+ val = serialized.get(k)
1073
+
1074
+ if val != UNSET_SENTINEL:
1075
+ if val is not None or k not in optional_fields:
1076
+ m[k] = val
1077
+
1078
+ return m
1079
+
1080
+
1081
+ GetOnePromptThinkingTypedDict = TypeAliasType(
1082
+ "GetOnePromptThinkingTypedDict",
1083
+ Union[ThinkingConfigDisabledSchemaTypedDict, ThinkingConfigEnabledSchemaTypedDict],
1084
+ )
1085
+
1086
+
1087
+ GetOnePromptThinking = Annotated[
1088
+ Union[
1089
+ Annotated[ThinkingConfigDisabledSchema, Tag("disabled")],
1090
+ Annotated[ThinkingConfigEnabledSchema, Tag("enabled")],
1091
+ ],
1092
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1093
+ ]
1094
+
1095
+
1096
+ GetOnePromptToolChoiceType = Literal["function",]
1097
+ r"""The type of the tool. Currently, only function is supported."""
1098
+
1099
+
1100
+ class GetOnePromptToolChoiceFunctionTypedDict(TypedDict):
1101
+ name: str
1102
+ r"""The name of the function to call."""
1103
+
1104
+
1105
+ class GetOnePromptToolChoiceFunction(BaseModel):
1106
+ name: str
1107
+ r"""The name of the function to call."""
1108
+
1109
+
1110
+ class GetOnePromptToolChoice2TypedDict(TypedDict):
1111
+ function: GetOnePromptToolChoiceFunctionTypedDict
1112
+ type: NotRequired[GetOnePromptToolChoiceType]
1113
+ r"""The type of the tool. Currently, only function is supported."""
1114
+
1115
+
1116
+ class GetOnePromptToolChoice2(BaseModel):
1117
+ function: GetOnePromptToolChoiceFunction
1118
+
1119
+ type: Optional[GetOnePromptToolChoiceType] = None
1120
+ r"""The type of the tool. Currently, only function is supported."""
1121
+
1122
+ @model_serializer(mode="wrap")
1123
+ def serialize_model(self, handler):
1124
+ optional_fields = set(["type"])
1125
+ serialized = handler(self)
1126
+ m = {}
1127
+
1128
+ for n, f in type(self).model_fields.items():
1129
+ k = f.alias or n
1130
+ val = serialized.get(k)
1131
+
1132
+ if val != UNSET_SENTINEL:
1133
+ if val is not None or k not in optional_fields:
1134
+ m[k] = val
1135
+
1136
+ return m
1137
+
1138
+
1139
+ GetOnePromptToolChoice1 = Literal[
1140
+ "none",
1141
+ "auto",
1142
+ "required",
1143
+ ]
1144
+
1145
+
1146
+ GetOnePromptToolChoiceTypedDict = TypeAliasType(
1147
+ "GetOnePromptToolChoiceTypedDict",
1148
+ Union[GetOnePromptToolChoice2TypedDict, GetOnePromptToolChoice1],
1149
+ )
1150
+ r"""Controls which (if any) tool is called by the model."""
1151
+
1152
+
1153
+ GetOnePromptToolChoice = TypeAliasType(
1154
+ "GetOnePromptToolChoice", Union[GetOnePromptToolChoice2, GetOnePromptToolChoice1]
1155
+ )
1156
+ r"""Controls which (if any) tool is called by the model."""
1157
+
1158
+
1159
+ GetOnePromptModalities = Literal[
1160
+ "text",
1161
+ "audio",
1162
+ ]
1163
+
1164
+
1165
+ GetOnePromptID1 = Literal[
1166
+ "orq_pii_detection",
1167
+ "orq_sexual_moderation",
1168
+ "orq_harmful_moderation",
1169
+ ]
1170
+ r"""The key of the guardrail."""
1171
+
1172
+
1173
+ GetOnePromptIDTypedDict = TypeAliasType(
1174
+ "GetOnePromptIDTypedDict", Union[GetOnePromptID1, str]
1175
+ )
1176
+
1177
+
1178
+ GetOnePromptID = TypeAliasType("GetOnePromptID", Union[GetOnePromptID1, str])
1179
+
1180
+
1181
+ GetOnePromptExecuteOn = Literal[
1182
+ "input",
1183
+ "output",
1184
+ ]
1185
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1186
+
1187
+
1188
+ class GetOnePromptGuardrailsTypedDict(TypedDict):
1189
+ id: GetOnePromptIDTypedDict
1190
+ execute_on: GetOnePromptExecuteOn
1191
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1192
+
1193
+
1194
+ class GetOnePromptGuardrails(BaseModel):
1195
+ id: GetOnePromptID
1196
+
1197
+ execute_on: GetOnePromptExecuteOn
1198
+ r"""Determines whether the guardrail runs on the input (user message) or output (model response)."""
1199
+
1200
+
1201
+ class GetOnePromptFallbacksTypedDict(TypedDict):
1202
+ model: str
1203
+ r"""Fallback model identifier"""
1204
+
1205
+
1206
+ class GetOnePromptFallbacks(BaseModel):
1207
+ model: str
1208
+ r"""Fallback model identifier"""
1209
+
1210
+
1211
+ class GetOnePromptRetryTypedDict(TypedDict):
1212
+ r"""Retry configuration for the request"""
1213
+
1214
+ count: NotRequired[float]
1215
+ r"""Number of retry attempts (1-5)"""
1216
+ on_codes: NotRequired[List[float]]
1217
+ r"""HTTP status codes that trigger retry logic"""
1218
+
1219
+
1220
+ class GetOnePromptRetry(BaseModel):
1221
+ r"""Retry configuration for the request"""
1222
+
1223
+ count: Optional[float] = 3
1224
+ r"""Number of retry attempts (1-5)"""
1225
+
1226
+ on_codes: Optional[List[float]] = None
1227
+ r"""HTTP status codes that trigger retry logic"""
1228
+
1229
+ @model_serializer(mode="wrap")
1230
+ def serialize_model(self, handler):
1231
+ optional_fields = set(["count", "on_codes"])
1232
+ serialized = handler(self)
1233
+ m = {}
1234
+
1235
+ for n, f in type(self).model_fields.items():
1236
+ k = f.alias or n
1237
+ val = serialized.get(k)
1238
+
1239
+ if val != UNSET_SENTINEL:
1240
+ if val is not None or k not in optional_fields:
1241
+ m[k] = val
1242
+
1243
+ return m
1244
+
1245
+
1246
+ GetOnePromptPromptsType = Literal["exact_match",]
1247
+
1248
+
1249
+ class GetOnePromptCacheTypedDict(TypedDict):
1250
+ r"""Cache configuration for the request."""
1251
+
1252
+ type: GetOnePromptPromptsType
1253
+ ttl: NotRequired[float]
1254
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1255
+
1256
+
1257
+ class GetOnePromptCache(BaseModel):
1258
+ r"""Cache configuration for the request."""
1259
+
1260
+ type: GetOnePromptPromptsType
1261
+
1262
+ ttl: Optional[float] = 1800
1263
+ r"""Time to live for cached responses in seconds. Maximum 259200 seconds (3 days)."""
1264
+
1265
+ @model_serializer(mode="wrap")
1266
+ def serialize_model(self, handler):
1267
+ optional_fields = set(["ttl"])
1268
+ serialized = handler(self)
1269
+ m = {}
1270
+
1271
+ for n, f in type(self).model_fields.items():
1272
+ k = f.alias or n
1273
+ val = serialized.get(k)
1274
+
1275
+ if val != UNSET_SENTINEL:
1276
+ if val is not None or k not in optional_fields:
1277
+ m[k] = val
1278
+
1279
+ return m
1280
+
1281
+
1282
+ GetOnePromptLoadBalancerType = Literal["weight_based",]
1283
+
1284
+
1285
+ class GetOnePromptLoadBalancerModelsTypedDict(TypedDict):
1286
+ model: str
1287
+ r"""Model identifier for load balancing"""
1288
+ weight: NotRequired[float]
1289
+ r"""Weight assigned to this model for load balancing"""
1290
+
1291
+
1292
+ class GetOnePromptLoadBalancerModels(BaseModel):
1293
+ model: str
1294
+ r"""Model identifier for load balancing"""
1295
+
1296
+ weight: Optional[float] = 0.5
1297
+ r"""Weight assigned to this model for load balancing"""
1298
+
1299
+ @model_serializer(mode="wrap")
1300
+ def serialize_model(self, handler):
1301
+ optional_fields = set(["weight"])
1302
+ serialized = handler(self)
1303
+ m = {}
1304
+
1305
+ for n, f in type(self).model_fields.items():
1306
+ k = f.alias or n
1307
+ val = serialized.get(k)
1308
+
1309
+ if val != UNSET_SENTINEL:
1310
+ if val is not None or k not in optional_fields:
1311
+ m[k] = val
1312
+
1313
+ return m
1314
+
1315
+
1316
+ class GetOnePromptLoadBalancer1TypedDict(TypedDict):
1317
+ type: GetOnePromptLoadBalancerType
1318
+ models: List[GetOnePromptLoadBalancerModelsTypedDict]
1319
+
1320
+
1321
+ class GetOnePromptLoadBalancer1(BaseModel):
1322
+ type: GetOnePromptLoadBalancerType
1323
+
1324
+ models: List[GetOnePromptLoadBalancerModels]
1325
+
1326
+
1327
+ GetOnePromptLoadBalancerTypedDict = GetOnePromptLoadBalancer1TypedDict
1328
+ r"""Load balancer configuration for the request."""
1329
+
1330
+
1331
+ GetOnePromptLoadBalancer = GetOnePromptLoadBalancer1
1332
+ r"""Load balancer configuration for the request."""
1333
+
1334
+
1335
+ class GetOnePromptTimeoutTypedDict(TypedDict):
1336
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1337
+
1338
+ call_timeout: float
1339
+ r"""Timeout value in milliseconds"""
1340
+
1341
+
1342
+ class GetOnePromptTimeout(BaseModel):
1343
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1344
+
1345
+ call_timeout: float
1346
+ r"""Timeout value in milliseconds"""
1347
+
1348
+
1349
+ GetOnePromptMessagesPromptsResponse200Role = Literal["tool",]
1350
+ r"""The role of the messages author, in this case tool."""
1351
+
1352
+
1353
+ GetOnePromptContentPromptsResponse2002TypedDict = TextContentPartSchemaTypedDict
1354
+
1355
+
1356
+ GetOnePromptContentPromptsResponse2002 = TextContentPartSchema
1357
+
1358
+
1359
+ GetOnePromptMessagesPromptsResponse200ContentTypedDict = TypeAliasType(
1360
+ "GetOnePromptMessagesPromptsResponse200ContentTypedDict",
1361
+ Union[str, List[GetOnePromptContentPromptsResponse2002TypedDict]],
1362
+ )
1363
+ r"""The contents of the tool message."""
1364
+
1365
+
1366
+ GetOnePromptMessagesPromptsResponse200Content = TypeAliasType(
1367
+ "GetOnePromptMessagesPromptsResponse200Content",
1368
+ Union[str, List[GetOnePromptContentPromptsResponse2002]],
1369
+ )
1370
+ r"""The contents of the tool message."""
1371
+
1372
+
1373
+ GetOnePromptMessagesPromptsType = Literal["ephemeral",]
1374
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1375
+
1376
+
1377
+ GetOnePromptMessagesTTL = Literal[
1378
+ "5m",
1379
+ "1h",
1380
+ ]
1381
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1382
+
1383
+ - `5m`: 5 minutes
1384
+ - `1h`: 1 hour
1385
+
1386
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1387
+ """
1388
+
1389
+
1390
+ class GetOnePromptMessagesCacheControlTypedDict(TypedDict):
1391
+ type: GetOnePromptMessagesPromptsType
1392
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1393
+ ttl: NotRequired[GetOnePromptMessagesTTL]
1394
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1395
+
1396
+ - `5m`: 5 minutes
1397
+ - `1h`: 1 hour
1398
+
1399
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1400
+ """
1401
+
1402
+
1403
+ class GetOnePromptMessagesCacheControl(BaseModel):
1404
+ type: GetOnePromptMessagesPromptsType
1405
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1406
+
1407
+ ttl: Optional[GetOnePromptMessagesTTL] = "5m"
1408
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1409
+
1410
+ - `5m`: 5 minutes
1411
+ - `1h`: 1 hour
1412
+
1413
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1414
+ """
1415
+
1416
+ @model_serializer(mode="wrap")
1417
+ def serialize_model(self, handler):
1418
+ optional_fields = set(["ttl"])
1419
+ serialized = handler(self)
1420
+ m = {}
1421
+
1422
+ for n, f in type(self).model_fields.items():
1423
+ k = f.alias or n
1424
+ val = serialized.get(k)
1425
+
1426
+ if val != UNSET_SENTINEL:
1427
+ if val is not None or k not in optional_fields:
1428
+ m[k] = val
1429
+
1430
+ return m
1431
+
1432
+
1433
+ class GetOnePromptMessagesToolMessageTypedDict(TypedDict):
1434
+ role: GetOnePromptMessagesPromptsResponse200Role
1435
+ r"""The role of the messages author, in this case tool."""
1436
+ content: GetOnePromptMessagesPromptsResponse200ContentTypedDict
1437
+ r"""The contents of the tool message."""
1438
+ tool_call_id: Nullable[str]
1439
+ r"""Tool call that this message is responding to."""
1440
+ cache_control: NotRequired[GetOnePromptMessagesCacheControlTypedDict]
1441
+
1442
+
1443
+ class GetOnePromptMessagesToolMessage(BaseModel):
1444
+ role: GetOnePromptMessagesPromptsResponse200Role
1445
+ r"""The role of the messages author, in this case tool."""
1446
+
1447
+ content: GetOnePromptMessagesPromptsResponse200Content
1448
+ r"""The contents of the tool message."""
1449
+
1450
+ tool_call_id: Nullable[str]
1451
+ r"""Tool call that this message is responding to."""
1452
+
1453
+ cache_control: Optional[GetOnePromptMessagesCacheControl] = None
1454
+
1455
+ @model_serializer(mode="wrap")
1456
+ def serialize_model(self, handler):
1457
+ optional_fields = set(["cache_control"])
1458
+ nullable_fields = set(["tool_call_id"])
1459
+ serialized = handler(self)
1460
+ m = {}
1461
+
1462
+ for n, f in type(self).model_fields.items():
1463
+ k = f.alias or n
1464
+ val = serialized.get(k)
1465
+ is_nullable_and_explicitly_set = (
1466
+ k in nullable_fields
1467
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1468
+ )
1469
+
1470
+ if val != UNSET_SENTINEL:
1471
+ if (
1472
+ val is not None
1473
+ or k not in optional_fields
1474
+ or is_nullable_and_explicitly_set
1475
+ ):
1476
+ m[k] = val
1477
+
1478
+ return m
1479
+
1480
+
1481
+ GetOnePromptContentPromptsResponse2TypedDict = TypeAliasType(
1482
+ "GetOnePromptContentPromptsResponse2TypedDict",
1483
+ Union[
1484
+ RefusalPartSchemaTypedDict,
1485
+ RedactedReasoningPartSchemaTypedDict,
1486
+ TextContentPartSchemaTypedDict,
1487
+ ReasoningPartSchemaTypedDict,
1488
+ ],
1489
+ )
1490
+
1491
+
1492
+ GetOnePromptContentPromptsResponse2 = Annotated[
1493
+ Union[
1494
+ Annotated[TextContentPartSchema, Tag("text")],
1495
+ Annotated[RefusalPartSchema, Tag("refusal")],
1496
+ Annotated[ReasoningPartSchema, Tag("reasoning")],
1497
+ Annotated[RedactedReasoningPartSchema, Tag("redacted_reasoning")],
1498
+ ],
1499
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1500
+ ]
1501
+
1502
+
1503
+ GetOnePromptMessagesPromptsResponseContentTypedDict = TypeAliasType(
1504
+ "GetOnePromptMessagesPromptsResponseContentTypedDict",
1505
+ Union[str, List[GetOnePromptContentPromptsResponse2TypedDict]],
1506
+ )
1507
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1508
+
1509
+
1510
+ GetOnePromptMessagesPromptsResponseContent = TypeAliasType(
1511
+ "GetOnePromptMessagesPromptsResponseContent",
1512
+ Union[str, List[GetOnePromptContentPromptsResponse2]],
1513
+ )
1514
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1515
+
1516
+
1517
+ GetOnePromptMessagesPromptsResponseRole = Literal["assistant",]
1518
+ r"""The role of the messages author, in this case `assistant`."""
1519
+
1520
+
1521
+ class GetOnePromptMessagesAudioTypedDict(TypedDict):
1522
+ r"""Data about a previous audio response from the model."""
1523
+
1524
+ id: str
1525
+ r"""Unique identifier for a previous audio response from the model."""
1526
+
1527
+
1528
+ class GetOnePromptMessagesAudio(BaseModel):
1529
+ r"""Data about a previous audio response from the model."""
1530
+
1531
+ id: str
1532
+ r"""Unique identifier for a previous audio response from the model."""
1533
+
1534
+
1535
+ GetOnePromptMessagesType = Literal["function",]
1536
+ r"""The type of the tool. Currently, only `function` is supported."""
1537
+
1538
+
1539
+ class GetOnePromptMessagesFunctionTypedDict(TypedDict):
1540
+ name: NotRequired[str]
1541
+ r"""The name of the function to call."""
1542
+ arguments: NotRequired[str]
1543
+ r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
1544
+
1545
+
1546
+ class GetOnePromptMessagesFunction(BaseModel):
1547
+ name: Optional[str] = None
1548
+ r"""The name of the function to call."""
1549
+
1550
+ arguments: Optional[str] = None
1551
+ r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."""
1552
+
1553
+ @model_serializer(mode="wrap")
1554
+ def serialize_model(self, handler):
1555
+ optional_fields = set(["name", "arguments"])
1556
+ serialized = handler(self)
1557
+ m = {}
1558
+
1559
+ for n, f in type(self).model_fields.items():
1560
+ k = f.alias or n
1561
+ val = serialized.get(k)
1562
+
1563
+ if val != UNSET_SENTINEL:
1564
+ if val is not None or k not in optional_fields:
1565
+ m[k] = val
1566
+
1567
+ return m
1568
+
1569
+
1570
+ class GetOnePromptMessagesToolCallsTypedDict(TypedDict):
1571
+ id: str
1572
+ r"""The ID of the tool call."""
1573
+ type: GetOnePromptMessagesType
1574
+ r"""The type of the tool. Currently, only `function` is supported."""
1575
+ function: GetOnePromptMessagesFunctionTypedDict
1576
+ thought_signature: NotRequired[str]
1577
+ r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
1578
+
1579
+
1580
+ class GetOnePromptMessagesToolCalls(BaseModel):
1581
+ id: str
1582
+ r"""The ID of the tool call."""
1583
+
1584
+ type: GetOnePromptMessagesType
1585
+ r"""The type of the tool. Currently, only `function` is supported."""
1586
+
1587
+ function: GetOnePromptMessagesFunction
1588
+
1589
+ thought_signature: Optional[str] = None
1590
+ r"""Encrypted representation of the model internal reasoning state during function calling. Required by Gemini 3 models when continuing a conversation after a tool call."""
1591
+
1592
+ @model_serializer(mode="wrap")
1593
+ def serialize_model(self, handler):
1594
+ optional_fields = set(["thought_signature"])
1595
+ serialized = handler(self)
1596
+ m = {}
1597
+
1598
+ for n, f in type(self).model_fields.items():
1599
+ k = f.alias or n
1600
+ val = serialized.get(k)
1601
+
1602
+ if val != UNSET_SENTINEL:
1603
+ if val is not None or k not in optional_fields:
1604
+ m[k] = val
1605
+
1606
+ return m
1607
+
1608
+
1609
+ class GetOnePromptMessagesAssistantMessageTypedDict(TypedDict):
1610
+ role: GetOnePromptMessagesPromptsResponseRole
1611
+ r"""The role of the messages author, in this case `assistant`."""
1612
+ content: NotRequired[Nullable[GetOnePromptMessagesPromptsResponseContentTypedDict]]
1613
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1614
+ refusal: NotRequired[Nullable[str]]
1615
+ r"""The refusal message by the assistant."""
1616
+ name: NotRequired[str]
1617
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1618
+ audio: NotRequired[Nullable[GetOnePromptMessagesAudioTypedDict]]
1619
+ r"""Data about a previous audio response from the model."""
1620
+ tool_calls: NotRequired[List[GetOnePromptMessagesToolCallsTypedDict]]
1621
+ r"""The tool calls generated by the model, such as function calls."""
1622
+
1623
+
1624
+ class GetOnePromptMessagesAssistantMessage(BaseModel):
1625
+ role: GetOnePromptMessagesPromptsResponseRole
1626
+ r"""The role of the messages author, in this case `assistant`."""
1627
+
1628
+ content: OptionalNullable[GetOnePromptMessagesPromptsResponseContent] = UNSET
1629
+ r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."""
1630
+
1631
+ refusal: OptionalNullable[str] = UNSET
1632
+ r"""The refusal message by the assistant."""
1633
+
1634
+ name: Optional[str] = None
1635
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1636
+
1637
+ audio: OptionalNullable[GetOnePromptMessagesAudio] = UNSET
1638
+ r"""Data about a previous audio response from the model."""
1639
+
1640
+ tool_calls: Optional[List[GetOnePromptMessagesToolCalls]] = None
1641
+ r"""The tool calls generated by the model, such as function calls."""
1642
+
1643
+ @model_serializer(mode="wrap")
1644
+ def serialize_model(self, handler):
1645
+ optional_fields = set(["content", "refusal", "name", "audio", "tool_calls"])
1646
+ nullable_fields = set(["content", "refusal", "audio"])
1647
+ serialized = handler(self)
1648
+ m = {}
1649
+
1650
+ for n, f in type(self).model_fields.items():
1651
+ k = f.alias or n
1652
+ val = serialized.get(k)
1653
+ is_nullable_and_explicitly_set = (
1654
+ k in nullable_fields
1655
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
1656
+ )
1657
+
1658
+ if val != UNSET_SENTINEL:
1659
+ if (
1660
+ val is not None
1661
+ or k not in optional_fields
1662
+ or is_nullable_and_explicitly_set
1663
+ ):
1664
+ m[k] = val
1665
+
1666
+ return m
1667
+
1668
+
1669
+ GetOnePromptMessagesPromptsRole = Literal["user",]
1670
+ r"""The role of the messages author, in this case `user`."""
1671
+
1672
+
1673
+ GetOnePrompt2PromptsResponse200Type = Literal["file",]
1674
+ r"""The type of the content part. Always `file`."""
1675
+
1676
+
1677
+ GetOnePrompt2PromptsResponse200ApplicationJSONType = Literal["ephemeral",]
1678
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1679
+
1680
+
1681
+ GetOnePrompt2TTL = Literal[
1682
+ "5m",
1683
+ "1h",
1684
+ ]
1685
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1686
+
1687
+ - `5m`: 5 minutes
1688
+ - `1h`: 1 hour
1689
+
1690
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1691
+ """
1692
+
1693
+
1694
+ class GetOnePrompt2CacheControlTypedDict(TypedDict):
1695
+ type: GetOnePrompt2PromptsResponse200ApplicationJSONType
1696
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1697
+ ttl: NotRequired[GetOnePrompt2TTL]
1698
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1699
+
1700
+ - `5m`: 5 minutes
1701
+ - `1h`: 1 hour
1702
+
1703
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1704
+ """
1705
+
1706
+
1707
+ class GetOnePrompt2CacheControl(BaseModel):
1708
+ type: GetOnePrompt2PromptsResponse200ApplicationJSONType
1709
+ r"""Create a cache control breakpoint at this content block. Accepts only the value \"ephemeral\"."""
1710
+
1711
+ ttl: Optional[GetOnePrompt2TTL] = "5m"
1712
+ r"""The time-to-live for the cache control breakpoint. This may be one of the following values:
1713
+
1714
+ - `5m`: 5 minutes
1715
+ - `1h`: 1 hour
1716
+
1717
+ Defaults to `5m`. Only supported by `Anthropic` Claude models.
1718
+ """
1719
+
1720
+ @model_serializer(mode="wrap")
1721
+ def serialize_model(self, handler):
1722
+ optional_fields = set(["ttl"])
1723
+ serialized = handler(self)
1724
+ m = {}
1725
+
1726
+ for n, f in type(self).model_fields.items():
1727
+ k = f.alias or n
1728
+ val = serialized.get(k)
1729
+
1730
+ if val != UNSET_SENTINEL:
1731
+ if val is not None or k not in optional_fields:
1732
+ m[k] = val
1733
+
1734
+ return m
1735
+
1736
+
1737
+ class GetOnePrompt24TypedDict(TypedDict):
1738
+ type: GetOnePrompt2PromptsResponse200Type
1739
+ r"""The type of the content part. Always `file`."""
1740
+ file: FileContentPartSchemaTypedDict
1741
+ r"""File data for the content part. Must contain either file_data or uri, but not both."""
1742
+ cache_control: NotRequired[GetOnePrompt2CacheControlTypedDict]
1743
+
1744
+
1745
+ class GetOnePrompt24(BaseModel):
1746
+ type: GetOnePrompt2PromptsResponse200Type
1747
+ r"""The type of the content part. Always `file`."""
1748
+
1749
+ file: FileContentPartSchema
1750
+ r"""File data for the content part. Must contain either file_data or uri, but not both."""
1751
+
1752
+ cache_control: Optional[GetOnePrompt2CacheControl] = None
1753
+
1754
+ @model_serializer(mode="wrap")
1755
+ def serialize_model(self, handler):
1756
+ optional_fields = set(["cache_control"])
1757
+ serialized = handler(self)
1758
+ m = {}
1759
+
1760
+ for n, f in type(self).model_fields.items():
1761
+ k = f.alias or n
1762
+ val = serialized.get(k)
1763
+
1764
+ if val != UNSET_SENTINEL:
1765
+ if val is not None or k not in optional_fields:
1766
+ m[k] = val
1767
+
1768
+ return m
1769
+
1770
+
1771
+ GetOnePromptContentPrompts2TypedDict = TypeAliasType(
1772
+ "GetOnePromptContentPrompts2TypedDict",
1773
+ Union[
1774
+ AudioContentPartSchemaTypedDict,
1775
+ TextContentPartSchemaTypedDict,
1776
+ ImageContentPartSchemaTypedDict,
1777
+ GetOnePrompt24TypedDict,
1778
+ ],
1779
+ )
1780
+
1781
+
1782
+ GetOnePromptContentPrompts2 = Annotated[
1783
+ Union[
1784
+ Annotated[TextContentPartSchema, Tag("text")],
1785
+ Annotated[ImageContentPartSchema, Tag("image_url")],
1786
+ Annotated[AudioContentPartSchema, Tag("input_audio")],
1787
+ Annotated[GetOnePrompt24, Tag("file")],
1788
+ ],
1789
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
1790
+ ]
1791
+
1792
+
1793
+ GetOnePromptMessagesPromptsContentTypedDict = TypeAliasType(
1794
+ "GetOnePromptMessagesPromptsContentTypedDict",
1795
+ Union[str, List[GetOnePromptContentPrompts2TypedDict]],
1796
+ )
1797
+ r"""The contents of the user message."""
1798
+
1799
+
1800
+ GetOnePromptMessagesPromptsContent = TypeAliasType(
1801
+ "GetOnePromptMessagesPromptsContent", Union[str, List[GetOnePromptContentPrompts2]]
1802
+ )
1803
+ r"""The contents of the user message."""
1804
+
1805
+
1806
+ class GetOnePromptMessagesUserMessageTypedDict(TypedDict):
1807
+ role: GetOnePromptMessagesPromptsRole
1808
+ r"""The role of the messages author, in this case `user`."""
1809
+ content: GetOnePromptMessagesPromptsContentTypedDict
1810
+ r"""The contents of the user message."""
1811
+ name: NotRequired[str]
1812
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1813
+
1814
+
1815
+ class GetOnePromptMessagesUserMessage(BaseModel):
1816
+ role: GetOnePromptMessagesPromptsRole
1817
+ r"""The role of the messages author, in this case `user`."""
1818
+
1819
+ content: GetOnePromptMessagesPromptsContent
1820
+ r"""The contents of the user message."""
1821
+
1822
+ name: Optional[str] = None
1823
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1824
+
1825
+ @model_serializer(mode="wrap")
1826
+ def serialize_model(self, handler):
1827
+ optional_fields = set(["name"])
1828
+ serialized = handler(self)
1829
+ m = {}
1830
+
1831
+ for n, f in type(self).model_fields.items():
1832
+ k = f.alias or n
1833
+ val = serialized.get(k)
1834
+
1835
+ if val != UNSET_SENTINEL:
1836
+ if val is not None or k not in optional_fields:
1837
+ m[k] = val
1838
+
1839
+ return m
1840
+
1841
+
1842
+ GetOnePromptMessagesRole = Literal["system",]
1843
+ r"""The role of the messages author, in this case `system`."""
1844
+
1845
+
1846
+ GetOnePromptMessagesContentTypedDict = TypeAliasType(
1847
+ "GetOnePromptMessagesContentTypedDict",
1848
+ Union[str, List[TextContentPartSchemaTypedDict]],
1849
+ )
1850
+ r"""The contents of the system message."""
1851
+
1852
+
1853
+ GetOnePromptMessagesContent = TypeAliasType(
1854
+ "GetOnePromptMessagesContent", Union[str, List[TextContentPartSchema]]
1855
+ )
1856
+ r"""The contents of the system message."""
1857
+
1858
+
1859
+ class GetOnePromptMessagesSystemMessageTypedDict(TypedDict):
1860
+ r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
1861
+
1862
+ role: GetOnePromptMessagesRole
1863
+ r"""The role of the messages author, in this case `system`."""
1864
+ content: GetOnePromptMessagesContentTypedDict
1865
+ r"""The contents of the system message."""
1866
+ name: NotRequired[str]
1867
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1868
+
1869
+
1870
+ class GetOnePromptMessagesSystemMessage(BaseModel):
1871
+ r"""Developer-provided instructions that the model should follow, regardless of messages sent by the user."""
1872
+
1873
+ role: GetOnePromptMessagesRole
1874
+ r"""The role of the messages author, in this case `system`."""
1875
+
1876
+ content: GetOnePromptMessagesContent
1877
+ r"""The contents of the system message."""
1878
+
1879
+ name: Optional[str] = None
1880
+ r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role."""
1881
+
1882
+ @model_serializer(mode="wrap")
1883
+ def serialize_model(self, handler):
1884
+ optional_fields = set(["name"])
1885
+ serialized = handler(self)
1886
+ m = {}
1887
+
1888
+ for n, f in type(self).model_fields.items():
1889
+ k = f.alias or n
1890
+ val = serialized.get(k)
1891
+
1892
+ if val != UNSET_SENTINEL:
1893
+ if val is not None or k not in optional_fields:
1894
+ m[k] = val
1895
+
1896
+ return m
1897
+
1898
+
1899
+ GetOnePromptPromptsMessagesTypedDict = TypeAliasType(
1900
+ "GetOnePromptPromptsMessagesTypedDict",
1901
+ Union[
1902
+ GetOnePromptMessagesSystemMessageTypedDict,
1903
+ GetOnePromptMessagesUserMessageTypedDict,
1904
+ GetOnePromptMessagesToolMessageTypedDict,
1905
+ GetOnePromptMessagesAssistantMessageTypedDict,
1906
+ ],
1907
+ )
1908
+
1909
+
1910
+ GetOnePromptPromptsMessages = Annotated[
1911
+ Union[
1912
+ Annotated[GetOnePromptMessagesSystemMessage, Tag("system")],
1913
+ Annotated[GetOnePromptMessagesUserMessage, Tag("user")],
1914
+ Annotated[GetOnePromptMessagesAssistantMessage, Tag("assistant")],
1915
+ Annotated[GetOnePromptMessagesToolMessage, Tag("tool")],
1916
+ ],
1917
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
1918
+ ]
1919
+
1920
+
1921
+ class GetOnePromptPromptFieldTypedDict(TypedDict):
1922
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
1923
+
1924
+ audio: NotRequired[Nullable[GetOnePromptAudioTypedDict]]
1925
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
1926
+ frequency_penalty: NotRequired[Nullable[float]]
1927
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
1928
+ max_tokens: NotRequired[Nullable[int]]
1929
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
1930
+
1931
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
1932
+ """
1933
+ max_completion_tokens: NotRequired[Nullable[int]]
1934
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
1935
+ logprobs: NotRequired[Nullable[bool]]
1936
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
1937
+ top_logprobs: NotRequired[Nullable[int]]
1938
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
1939
+ n: NotRequired[Nullable[int]]
1940
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
1941
+ presence_penalty: NotRequired[Nullable[float]]
1942
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
1943
+ response_format: NotRequired[GetOnePromptResponseFormatTypedDict]
1944
+ r"""An object specifying the format that the model must output"""
1945
+ reasoning_effort: NotRequired[GetOnePromptReasoningEffort]
1946
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1947
+
1948
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
1949
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
1950
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1951
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1952
+
1953
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
1954
+ """
1955
+ verbosity: NotRequired[str]
1956
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
1957
+ seed: NotRequired[Nullable[float]]
1958
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
1959
+ stop: NotRequired[Nullable[GetOnePromptStopTypedDict]]
1960
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
1961
+ stream_options: NotRequired[Nullable[GetOnePromptStreamOptionsTypedDict]]
1962
+ r"""Options for streaming response. Only set this when you set stream: true."""
1963
+ thinking: NotRequired[GetOnePromptThinkingTypedDict]
1964
+ temperature: NotRequired[Nullable[float]]
1965
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
1966
+ top_p: NotRequired[Nullable[float]]
1967
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
1968
+ top_k: NotRequired[Nullable[float]]
1969
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
1970
+ tool_choice: NotRequired[GetOnePromptToolChoiceTypedDict]
1971
+ r"""Controls which (if any) tool is called by the model."""
1972
+ parallel_tool_calls: NotRequired[bool]
1973
+ r"""Whether to enable parallel function calling during tool use."""
1974
+ modalities: NotRequired[Nullable[List[GetOnePromptModalities]]]
1975
+ r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
1976
+ guardrails: NotRequired[List[GetOnePromptGuardrailsTypedDict]]
1977
+ r"""A list of guardrails to apply to the request."""
1978
+ fallbacks: NotRequired[List[GetOnePromptFallbacksTypedDict]]
1979
+ r"""Array of fallback models to use if primary model fails"""
1980
+ retry: NotRequired[GetOnePromptRetryTypedDict]
1981
+ r"""Retry configuration for the request"""
1982
+ cache: NotRequired[GetOnePromptCacheTypedDict]
1983
+ r"""Cache configuration for the request."""
1984
+ load_balancer: NotRequired[GetOnePromptLoadBalancerTypedDict]
1985
+ r"""Load balancer configuration for the request."""
1986
+ timeout: NotRequired[GetOnePromptTimeoutTypedDict]
1987
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
1988
+ messages: NotRequired[List[GetOnePromptPromptsMessagesTypedDict]]
1989
+ r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
1990
+ model: NotRequired[Nullable[str]]
1991
+ r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
1992
+ version: NotRequired[str]
1993
+
1994
+
1995
+ class GetOnePromptPromptField(BaseModel):
1996
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
1997
+
1998
+ audio: OptionalNullable[GetOnePromptAudio] = UNSET
1999
+ r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
2000
+
2001
+ frequency_penalty: OptionalNullable[float] = UNSET
2002
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."""
2003
+
2004
+ max_tokens: OptionalNullable[int] = UNSET
2005
+ r"""`[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
2006
+
2007
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
2008
+ """
2009
+
2010
+ max_completion_tokens: OptionalNullable[int] = UNSET
2011
+ r"""An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens"""
2012
+
2013
+ logprobs: OptionalNullable[bool] = UNSET
2014
+ r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."""
2015
+
2016
+ top_logprobs: OptionalNullable[int] = UNSET
2017
+ r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."""
2018
+
2019
+ n: OptionalNullable[int] = UNSET
2020
+ r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."""
2021
+
2022
+ presence_penalty: OptionalNullable[float] = UNSET
2023
+ r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."""
2024
+
2025
+ response_format: Optional[GetOnePromptResponseFormat] = None
2026
+ r"""An object specifying the format that the model must output"""
2027
+
2028
+ reasoning_effort: Optional[GetOnePromptReasoningEffort] = None
2029
+ r"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
2030
+
2031
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
2032
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
2033
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
2034
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
2035
+
2036
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
2037
+ """
2038
+
2039
+ verbosity: Optional[str] = None
2040
+ r"""Adjusts response verbosity. Lower levels yield shorter answers."""
2041
+
2042
+ seed: OptionalNullable[float] = UNSET
2043
+ r"""If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result."""
2044
+
2045
+ stop: OptionalNullable[GetOnePromptStop] = UNSET
2046
+ r"""Up to 4 sequences where the API will stop generating further tokens."""
2047
+
2048
+ stream_options: OptionalNullable[GetOnePromptStreamOptions] = UNSET
2049
+ r"""Options for streaming response. Only set this when you set stream: true."""
2050
+
2051
+ thinking: Optional[GetOnePromptThinking] = None
2052
+
2053
+ temperature: OptionalNullable[float] = UNSET
2054
+ r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."""
2055
+
2056
+ top_p: OptionalNullable[float] = UNSET
2057
+ r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass."""
2058
+
2059
+ top_k: OptionalNullable[float] = UNSET
2060
+ r"""Limits the model to consider only the top k most likely tokens at each step."""
2061
+
2062
+ tool_choice: Optional[GetOnePromptToolChoice] = None
2063
+ r"""Controls which (if any) tool is called by the model."""
2064
+
2065
+ parallel_tool_calls: Optional[bool] = None
2066
+ r"""Whether to enable parallel function calling during tool use."""
2067
+
2068
+ modalities: OptionalNullable[List[GetOnePromptModalities]] = UNSET
2069
+ r"""Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"]."""
2070
+
2071
+ guardrails: Optional[List[GetOnePromptGuardrails]] = None
2072
+ r"""A list of guardrails to apply to the request."""
2073
+
2074
+ fallbacks: Optional[List[GetOnePromptFallbacks]] = None
2075
+ r"""Array of fallback models to use if primary model fails"""
2076
+
2077
+ retry: Optional[GetOnePromptRetry] = None
2078
+ r"""Retry configuration for the request"""
2079
+
2080
+ cache: Optional[GetOnePromptCache] = None
2081
+ r"""Cache configuration for the request."""
2082
+
2083
+ load_balancer: Optional[GetOnePromptLoadBalancer] = None
2084
+ r"""Load balancer configuration for the request."""
2085
+
2086
+ timeout: Optional[GetOnePromptTimeout] = None
2087
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
2088
+
2089
+ messages: Optional[List[GetOnePromptPromptsMessages]] = None
2090
+ r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
2091
+
2092
+ model: OptionalNullable[str] = UNSET
2093
+ r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`."""
2094
+
2095
+ version: Optional[str] = None
2096
+
2097
+ @model_serializer(mode="wrap")
2098
+ def serialize_model(self, handler):
2099
+ optional_fields = set(
2100
+ [
2101
+ "audio",
2102
+ "frequency_penalty",
2103
+ "max_tokens",
2104
+ "max_completion_tokens",
2105
+ "logprobs",
2106
+ "top_logprobs",
2107
+ "n",
2108
+ "presence_penalty",
2109
+ "response_format",
2110
+ "reasoning_effort",
2111
+ "verbosity",
2112
+ "seed",
2113
+ "stop",
2114
+ "stream_options",
2115
+ "thinking",
2116
+ "temperature",
2117
+ "top_p",
2118
+ "top_k",
2119
+ "tool_choice",
2120
+ "parallel_tool_calls",
2121
+ "modalities",
2122
+ "guardrails",
2123
+ "fallbacks",
2124
+ "retry",
2125
+ "cache",
2126
+ "load_balancer",
2127
+ "timeout",
2128
+ "messages",
2129
+ "model",
2130
+ "version",
2131
+ ]
2132
+ )
2133
+ nullable_fields = set(
2134
+ [
2135
+ "audio",
2136
+ "frequency_penalty",
2137
+ "max_tokens",
2138
+ "max_completion_tokens",
2139
+ "logprobs",
2140
+ "top_logprobs",
2141
+ "n",
2142
+ "presence_penalty",
2143
+ "seed",
2144
+ "stop",
2145
+ "stream_options",
2146
+ "temperature",
2147
+ "top_p",
2148
+ "top_k",
2149
+ "modalities",
2150
+ "model",
2151
+ ]
2152
+ )
2153
+ serialized = handler(self)
2154
+ m = {}
2155
+
2156
+ for n, f in type(self).model_fields.items():
2157
+ k = f.alias or n
2158
+ val = serialized.get(k)
2159
+ is_nullable_and_explicitly_set = (
2160
+ k in nullable_fields
2161
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2162
+ )
2163
+
2164
+ if val != UNSET_SENTINEL:
2165
+ if (
2166
+ val is not None
2167
+ or k not in optional_fields
2168
+ or is_nullable_and_explicitly_set
2169
+ ):
2170
+ m[k] = val
2171
+
2172
+ return m
2173
+
2174
+
2175
+ GetOnePromptUseCases = Literal[
2176
+ "Agents simulations",
2177
+ "Agents",
2178
+ "API interaction",
2179
+ "Autonomous Agents",
2180
+ "Chatbots",
2181
+ "Classification",
2182
+ "Code understanding",
2183
+ "Code writing",
2184
+ "Conversation",
2185
+ "Documents QA",
2186
+ "Evaluation",
2187
+ "Extraction",
2188
+ "Multi-modal",
2189
+ "Self-checking",
2190
+ "Sentiment analysis",
2191
+ "SQL",
2192
+ "Summarization",
2193
+ "Tagging",
2194
+ "Translation (document)",
2195
+ "Translation (sentences)",
2196
+ ]
2197
+
2198
+
2199
+ GetOnePromptLanguage = Literal[
2200
+ "Chinese",
2201
+ "Dutch",
2202
+ "English",
2203
+ "French",
2204
+ "German",
2205
+ "Russian",
2206
+ "Spanish",
2207
+ ]
2208
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2209
+
2210
+
2211
+ class GetOnePromptMetadataTypedDict(TypedDict):
2212
+ use_cases: NotRequired[List[GetOnePromptUseCases]]
2213
+ r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
2214
+ language: NotRequired[Nullable[GetOnePromptLanguage]]
2215
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2216
+
2217
+
2218
+ class GetOnePromptMetadata(BaseModel):
2219
+ use_cases: Optional[List[GetOnePromptUseCases]] = None
2220
+ r"""A list of use cases that the prompt is meant to be used for. Use this field to categorize the prompt for your own purpose"""
2221
+
2222
+ language: OptionalNullable[GetOnePromptLanguage] = UNSET
2223
+ r"""The language that the prompt is written in. Use this field to categorize the prompt for your own purpose"""
2224
+
2225
+ @model_serializer(mode="wrap")
2226
+ def serialize_model(self, handler):
2227
+ optional_fields = set(["use_cases", "language"])
2228
+ nullable_fields = set(["language"])
2229
+ serialized = handler(self)
2230
+ m = {}
2231
+
2232
+ for n, f in type(self).model_fields.items():
2233
+ k = f.alias or n
2234
+ val = serialized.get(k)
2235
+ is_nullable_and_explicitly_set = (
2236
+ k in nullable_fields
2237
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2238
+ )
2239
+
2240
+ if val != UNSET_SENTINEL:
2241
+ if (
2242
+ val is not None
2243
+ or k not in optional_fields
2244
+ or is_nullable_and_explicitly_set
2245
+ ):
2246
+ m[k] = val
2247
+
2248
+ return m
2249
+
2250
+
2251
+ class GetOnePromptPromptTypedDict(TypedDict):
2252
+ r"""A prompt entity with configuration, metadata, and versioning."""
2253
+
2254
+ id: str
2255
+ type: GetOnePromptType
2256
+ owner: str
2257
+ domain_id: str
2258
+ created: str
2259
+ updated: str
2260
+ display_name: str
2261
+ r"""The prompt’s name, meant to be displayable in the UI."""
2262
+ prompt: GetOnePromptPromptFieldTypedDict
2263
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2264
+ created_by_id: NotRequired[Nullable[str]]
2265
+ updated_by_id: NotRequired[Nullable[str]]
2266
+ description: NotRequired[Nullable[str]]
2267
+ r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
2268
+ prompt_config: NotRequired[GetOnePromptPromptConfigTypedDict]
2269
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
2270
+ metadata: NotRequired[GetOnePromptMetadataTypedDict]
2271
+
2272
+
2273
+ class GetOnePromptPrompt(BaseModel):
2274
+ r"""A prompt entity with configuration, metadata, and versioning."""
2275
+
2276
+ id: Annotated[str, pydantic.Field(alias="_id")]
2277
+
2278
+ type: GetOnePromptType
2279
+
2280
+ owner: str
2281
+
2282
+ domain_id: str
2283
+
2284
+ created: str
2285
+
2286
+ updated: str
2287
+
2288
+ display_name: str
2289
+ r"""The prompt’s name, meant to be displayable in the UI."""
2290
+
2291
+ prompt: GetOnePromptPromptField
2292
+ r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
2293
+
2294
+ created_by_id: OptionalNullable[str] = UNSET
2295
+
2296
+ updated_by_id: OptionalNullable[str] = UNSET
2297
+
2298
+ description: OptionalNullable[str] = UNSET
2299
+ r"""The prompt’s description, meant to be displayable in the UI. Use this field to optionally store a long form explanation of the prompt for your own purpose"""
2300
+
2301
+ prompt_config: Annotated[
2302
+ Optional[GetOnePromptPromptConfig],
2303
+ pydantic.Field(
2304
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
2305
+ ),
2306
+ ] = None
2307
+ r"""[DEPRECATED] Use the `prompt` property instead. A list of messages compatible with the openAI schema."""
2308
+
2309
+ metadata: Optional[GetOnePromptMetadata] = None
2310
+
2311
+ @model_serializer(mode="wrap")
2312
+ def serialize_model(self, handler):
2313
+ optional_fields = set(
2314
+ [
2315
+ "created_by_id",
2316
+ "updated_by_id",
2317
+ "description",
2318
+ "prompt_config",
2319
+ "metadata",
2320
+ ]
2321
+ )
2322
+ nullable_fields = set(["created_by_id", "updated_by_id", "description"])
2323
+ serialized = handler(self)
879
2324
  m = {}
880
2325
 
881
2326
  for n, f in type(self).model_fields.items():
882
2327
  k = f.alias or n
883
2328
  val = serialized.get(k)
884
- serialized.pop(k, None)
885
-
886
- optional_nullable = k in optional_fields and k in nullable_fields
887
- is_set = (
888
- self.__pydantic_fields_set__.intersection({n})
889
- or k in null_default_fields
890
- ) # pylint: disable=no-member
891
-
892
- if val is not None and val != UNSET_SENTINEL:
893
- m[k] = val
894
- elif val != UNSET_SENTINEL and (
895
- not k in optional_fields or (optional_nullable and is_set)
896
- ):
897
- m[k] = val
2329
+ is_nullable_and_explicitly_set = (
2330
+ k in nullable_fields
2331
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
2332
+ )
2333
+
2334
+ if val != UNSET_SENTINEL:
2335
+ if (
2336
+ val is not None
2337
+ or k not in optional_fields
2338
+ or is_nullable_and_explicitly_set
2339
+ ):
2340
+ m[k] = val
898
2341
 
899
2342
  return m