orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -0,0 +1,562 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .publiccontact import PublicContact, PublicContactTypedDict
5
+ from .publicidentity import PublicIdentity, PublicIdentityTypedDict
6
+ from dataclasses import dataclass, field
7
+ import httpx
8
+ import io
9
+ from orq_ai_sdk.models import OrqError
10
+ from orq_ai_sdk.types import BaseModel, Nullable, UNSET_SENTINEL
11
+ from orq_ai_sdk.utils import FieldMetadata, MultipartFormMetadata
12
+ import pydantic
13
+ from pydantic import model_serializer
14
+ from typing import IO, List, Literal, Optional, Union
15
+ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
16
+
17
+
18
+ CreateTranscriptionResponseFormat = Literal[
19
+ "json",
20
+ "text",
21
+ "srt",
22
+ "verbose_json",
23
+ "vtt",
24
+ ]
25
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
26
+
27
+
28
+ TimestampsGranularity = Literal[
29
+ "none",
30
+ "word",
31
+ "character",
32
+ ]
33
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
34
+
35
+
36
+ TimestampGranularities = Literal[
37
+ "word",
38
+ "segment",
39
+ ]
40
+
41
+
42
+ class CreateTranscriptionFallbacksTypedDict(TypedDict):
43
+ model: str
44
+ r"""Fallback model identifier"""
45
+
46
+
47
+ class CreateTranscriptionFallbacks(BaseModel):
48
+ model: str
49
+ r"""Fallback model identifier"""
50
+
51
+
52
+ class CreateTranscriptionRetryTypedDict(TypedDict):
53
+ r"""Retry configuration for the request"""
54
+
55
+ count: NotRequired[float]
56
+ r"""Number of retry attempts (1-5)"""
57
+ on_codes: NotRequired[List[float]]
58
+ r"""HTTP status codes that trigger retry logic"""
59
+
60
+
61
+ class CreateTranscriptionRetry(BaseModel):
62
+ r"""Retry configuration for the request"""
63
+
64
+ count: Optional[float] = 3
65
+ r"""Number of retry attempts (1-5)"""
66
+
67
+ on_codes: Optional[List[float]] = None
68
+ r"""HTTP status codes that trigger retry logic"""
69
+
70
+ @model_serializer(mode="wrap")
71
+ def serialize_model(self, handler):
72
+ optional_fields = set(["count", "on_codes"])
73
+ serialized = handler(self)
74
+ m = {}
75
+
76
+ for n, f in type(self).model_fields.items():
77
+ k = f.alias or n
78
+ val = serialized.get(k)
79
+
80
+ if val != UNSET_SENTINEL:
81
+ if val is not None or k not in optional_fields:
82
+ m[k] = val
83
+
84
+ return m
85
+
86
+
87
+ CreateTranscriptionLoadBalancerType = Literal["weight_based",]
88
+
89
+
90
+ class CreateTranscriptionLoadBalancerModelsTypedDict(TypedDict):
91
+ model: str
92
+ r"""Model identifier for load balancing"""
93
+ weight: NotRequired[float]
94
+ r"""Weight assigned to this model for load balancing"""
95
+
96
+
97
+ class CreateTranscriptionLoadBalancerModels(BaseModel):
98
+ model: str
99
+ r"""Model identifier for load balancing"""
100
+
101
+ weight: Optional[float] = 0.5
102
+ r"""Weight assigned to this model for load balancing"""
103
+
104
+ @model_serializer(mode="wrap")
105
+ def serialize_model(self, handler):
106
+ optional_fields = set(["weight"])
107
+ serialized = handler(self)
108
+ m = {}
109
+
110
+ for n, f in type(self).model_fields.items():
111
+ k = f.alias or n
112
+ val = serialized.get(k)
113
+
114
+ if val != UNSET_SENTINEL:
115
+ if val is not None or k not in optional_fields:
116
+ m[k] = val
117
+
118
+ return m
119
+
120
+
121
+ class CreateTranscriptionLoadBalancer1TypedDict(TypedDict):
122
+ type: CreateTranscriptionLoadBalancerType
123
+ models: List[CreateTranscriptionLoadBalancerModelsTypedDict]
124
+
125
+
126
+ class CreateTranscriptionLoadBalancer1(BaseModel):
127
+ type: CreateTranscriptionLoadBalancerType
128
+
129
+ models: List[CreateTranscriptionLoadBalancerModels]
130
+
131
+
132
+ CreateTranscriptionLoadBalancerTypedDict = CreateTranscriptionLoadBalancer1TypedDict
133
+ r"""Array of models with weights for load balancing requests"""
134
+
135
+
136
+ CreateTranscriptionLoadBalancer = CreateTranscriptionLoadBalancer1
137
+ r"""Array of models with weights for load balancing requests"""
138
+
139
+
140
+ class CreateTranscriptionTimeoutTypedDict(TypedDict):
141
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
142
+
143
+ call_timeout: float
144
+ r"""Timeout value in milliseconds"""
145
+
146
+
147
+ class CreateTranscriptionTimeout(BaseModel):
148
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
149
+
150
+ call_timeout: float
151
+ r"""Timeout value in milliseconds"""
152
+
153
+
154
+ class CreateTranscriptionOrqTypedDict(TypedDict):
155
+ name: NotRequired[str]
156
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
157
+ fallbacks: NotRequired[List[CreateTranscriptionFallbacksTypedDict]]
158
+ r"""Array of fallback models to use if primary model fails"""
159
+ retry: NotRequired[CreateTranscriptionRetryTypedDict]
160
+ r"""Retry configuration for the request"""
161
+ identity: NotRequired[PublicIdentityTypedDict]
162
+ r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
163
+ contact: NotRequired[PublicContactTypedDict]
164
+ r"""@deprecated Use identity instead. Information about the contact making the request."""
165
+ load_balancer: NotRequired[CreateTranscriptionLoadBalancerTypedDict]
166
+ r"""Array of models with weights for load balancing requests"""
167
+ timeout: NotRequired[CreateTranscriptionTimeoutTypedDict]
168
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
169
+
170
+
171
+ class CreateTranscriptionOrq(BaseModel):
172
+ name: Optional[str] = None
173
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
174
+
175
+ fallbacks: Optional[List[CreateTranscriptionFallbacks]] = None
176
+ r"""Array of fallback models to use if primary model fails"""
177
+
178
+ retry: Optional[CreateTranscriptionRetry] = None
179
+ r"""Retry configuration for the request"""
180
+
181
+ identity: Optional[PublicIdentity] = None
182
+ r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
183
+
184
+ contact: Annotated[
185
+ Optional[PublicContact],
186
+ pydantic.Field(
187
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
188
+ ),
189
+ ] = None
190
+ r"""@deprecated Use identity instead. Information about the contact making the request."""
191
+
192
+ load_balancer: Optional[CreateTranscriptionLoadBalancer] = None
193
+ r"""Array of models with weights for load balancing requests"""
194
+
195
+ timeout: Optional[CreateTranscriptionTimeout] = None
196
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
197
+
198
+ @model_serializer(mode="wrap")
199
+ def serialize_model(self, handler):
200
+ optional_fields = set(
201
+ [
202
+ "name",
203
+ "fallbacks",
204
+ "retry",
205
+ "identity",
206
+ "contact",
207
+ "load_balancer",
208
+ "timeout",
209
+ ]
210
+ )
211
+ serialized = handler(self)
212
+ m = {}
213
+
214
+ for n, f in type(self).model_fields.items():
215
+ k = f.alias or n
216
+ val = serialized.get(k)
217
+
218
+ if val != UNSET_SENTINEL:
219
+ if val is not None or k not in optional_fields:
220
+ m[k] = val
221
+
222
+ return m
223
+
224
+
225
+ class CreateTranscriptionFileTypedDict(TypedDict):
226
+ file_name: str
227
+ content: Union[bytes, IO[bytes], io.BufferedReader]
228
+ content_type: NotRequired[str]
229
+
230
+
231
+ class CreateTranscriptionFile(BaseModel):
232
+ file_name: Annotated[
233
+ str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True)
234
+ ]
235
+
236
+ content: Annotated[
237
+ Union[bytes, IO[bytes], io.BufferedReader],
238
+ pydantic.Field(alias=""),
239
+ FieldMetadata(multipart=MultipartFormMetadata(content=True)),
240
+ ]
241
+
242
+ content_type: Annotated[
243
+ Optional[str],
244
+ pydantic.Field(alias="Content-Type"),
245
+ FieldMetadata(multipart=True),
246
+ ] = None
247
+
248
+ @model_serializer(mode="wrap")
249
+ def serialize_model(self, handler):
250
+ optional_fields = set(["contentType"])
251
+ serialized = handler(self)
252
+ m = {}
253
+
254
+ for n, f in type(self).model_fields.items():
255
+ k = f.alias or n
256
+ val = serialized.get(k)
257
+
258
+ if val != UNSET_SENTINEL:
259
+ if val is not None or k not in optional_fields:
260
+ m[k] = val
261
+
262
+ return m
263
+
264
+
265
+ class CreateTranscriptionRequestBodyTypedDict(TypedDict):
266
+ r"""Transcribes audio into the input language."""
267
+
268
+ model: str
269
+ r"""ID of the model to use"""
270
+ prompt: NotRequired[str]
271
+ r"""An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language."""
272
+ enable_logging: NotRequired[bool]
273
+ r"""When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers."""
274
+ diarize: NotRequired[bool]
275
+ r"""Whether to annotate which speaker is currently talking in the uploaded file."""
276
+ response_format: NotRequired[CreateTranscriptionResponseFormat]
277
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
278
+ tag_audio_events: NotRequired[bool]
279
+ r"""Whether to tag audio events like (laughter), (footsteps), etc. in the transcription."""
280
+ num_speakers: NotRequired[float]
281
+ r"""The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32."""
282
+ timestamps_granularity: NotRequired[TimestampsGranularity]
283
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
284
+ temperature: NotRequired[float]
285
+ r"""The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit."""
286
+ language: NotRequired[str]
287
+ r"""The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency."""
288
+ timestamp_granularities: NotRequired[List[TimestampGranularities]]
289
+ r"""The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency."""
290
+ orq: NotRequired[CreateTranscriptionOrqTypedDict]
291
+ file: NotRequired[CreateTranscriptionFileTypedDict]
292
+ r"""The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm."""
293
+
294
+
295
+ class CreateTranscriptionRequestBody(BaseModel):
296
+ r"""Transcribes audio into the input language."""
297
+
298
+ model: Annotated[str, FieldMetadata(multipart=True)]
299
+ r"""ID of the model to use"""
300
+
301
+ prompt: Annotated[Optional[str], FieldMetadata(multipart=True)] = None
302
+ r"""An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language."""
303
+
304
+ enable_logging: Annotated[Optional[bool], FieldMetadata(multipart=True)] = True
305
+ r"""When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers."""
306
+
307
+ diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False
308
+ r"""Whether to annotate which speaker is currently talking in the uploaded file."""
309
+
310
+ response_format: Annotated[
311
+ Optional[CreateTranscriptionResponseFormat], FieldMetadata(multipart=True)
312
+ ] = None
313
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
314
+
315
+ tag_audio_events: Annotated[Optional[bool], FieldMetadata(multipart=True)] = True
316
+ r"""Whether to tag audio events like (laughter), (footsteps), etc. in the transcription."""
317
+
318
+ num_speakers: Annotated[Optional[float], FieldMetadata(multipart=True)] = None
319
+ r"""The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32."""
320
+
321
+ timestamps_granularity: Annotated[
322
+ Optional[TimestampsGranularity], FieldMetadata(multipart=True)
323
+ ] = "word"
324
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
325
+
326
+ temperature: Annotated[Optional[float], FieldMetadata(multipart=True)] = None
327
+ r"""The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit."""
328
+
329
+ language: Annotated[Optional[str], FieldMetadata(multipart=True)] = None
330
+ r"""The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency."""
331
+
332
+ timestamp_granularities: Annotated[
333
+ Optional[List[TimestampGranularities]], FieldMetadata(multipart=True)
334
+ ] = None
335
+ r"""The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency."""
336
+
337
+ orq: Annotated[
338
+ Optional[CreateTranscriptionOrq],
339
+ FieldMetadata(multipart=MultipartFormMetadata(json=True)),
340
+ ] = None
341
+
342
+ file: Annotated[
343
+ Optional[CreateTranscriptionFile],
344
+ FieldMetadata(multipart=MultipartFormMetadata(file=True)),
345
+ ] = None
346
+ r"""The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm."""
347
+
348
+ @model_serializer(mode="wrap")
349
+ def serialize_model(self, handler):
350
+ optional_fields = set(
351
+ [
352
+ "prompt",
353
+ "enable_logging",
354
+ "diarize",
355
+ "response_format",
356
+ "tag_audio_events",
357
+ "num_speakers",
358
+ "timestamps_granularity",
359
+ "temperature",
360
+ "language",
361
+ "timestamp_granularities",
362
+ "orq",
363
+ "file",
364
+ ]
365
+ )
366
+ serialized = handler(self)
367
+ m = {}
368
+
369
+ for n, f in type(self).model_fields.items():
370
+ k = f.alias or n
371
+ val = serialized.get(k)
372
+
373
+ if val != UNSET_SENTINEL:
374
+ if val is not None or k not in optional_fields:
375
+ m[k] = val
376
+
377
+ return m
378
+
379
+
380
+ class CreateTranscriptionErrorTypedDict(TypedDict):
381
+ message: str
382
+ type: str
383
+ param: Nullable[str]
384
+ code: str
385
+
386
+
387
+ class CreateTranscriptionError(BaseModel):
388
+ message: str
389
+
390
+ type: str
391
+
392
+ param: Nullable[str]
393
+
394
+ code: str
395
+
396
+ @model_serializer(mode="wrap")
397
+ def serialize_model(self, handler):
398
+ serialized = handler(self)
399
+ m = {}
400
+
401
+ for n, f in type(self).model_fields.items():
402
+ k = f.alias or n
403
+ val = serialized.get(k)
404
+
405
+ if val != UNSET_SENTINEL:
406
+ m[k] = val
407
+
408
+ return m
409
+
410
+
411
+ class CreateTranscriptionRouterAudioTranscriptionsResponseBodyData(BaseModel):
412
+ error: CreateTranscriptionError
413
+
414
+
415
+ @dataclass(unsafe_hash=True)
416
+ class CreateTranscriptionRouterAudioTranscriptionsResponseBody(OrqError):
417
+ r"""Returns validation error"""
418
+
419
+ data: CreateTranscriptionRouterAudioTranscriptionsResponseBodyData = field(
420
+ hash=False
421
+ )
422
+
423
+ def __init__(
424
+ self,
425
+ data: CreateTranscriptionRouterAudioTranscriptionsResponseBodyData,
426
+ raw_response: httpx.Response,
427
+ body: Optional[str] = None,
428
+ ):
429
+ fallback = body or raw_response.text
430
+ message = str(data.error.message) or fallback
431
+ super().__init__(message, raw_response, body)
432
+ object.__setattr__(self, "data", data)
433
+
434
+
435
+ class WordsTypedDict(TypedDict):
436
+ word: NotRequired[str]
437
+ start: NotRequired[float]
438
+ end: NotRequired[float]
439
+
440
+
441
+ class Words(BaseModel):
442
+ word: Optional[str] = None
443
+
444
+ start: Optional[float] = None
445
+
446
+ end: Optional[float] = None
447
+
448
+ @model_serializer(mode="wrap")
449
+ def serialize_model(self, handler):
450
+ optional_fields = set(["word", "start", "end"])
451
+ serialized = handler(self)
452
+ m = {}
453
+
454
+ for n, f in type(self).model_fields.items():
455
+ k = f.alias or n
456
+ val = serialized.get(k)
457
+
458
+ if val != UNSET_SENTINEL:
459
+ if val is not None or k not in optional_fields:
460
+ m[k] = val
461
+
462
+ return m
463
+
464
+
465
+ class SegmentsTypedDict(TypedDict):
466
+ id: float
467
+ seek: float
468
+ start: float
469
+ end: float
470
+ text: str
471
+ tokens: List[float]
472
+ temperature: float
473
+ avg_logprob: float
474
+ compression_ratio: float
475
+ no_speech_prob: float
476
+
477
+
478
+ class Segments(BaseModel):
479
+ id: float
480
+
481
+ seek: float
482
+
483
+ start: float
484
+
485
+ end: float
486
+
487
+ text: str
488
+
489
+ tokens: List[float]
490
+
491
+ temperature: float
492
+
493
+ avg_logprob: float
494
+
495
+ compression_ratio: float
496
+
497
+ no_speech_prob: float
498
+
499
+
500
+ class CreateTranscriptionResponseBody2TypedDict(TypedDict):
501
+ text: str
502
+ task: NotRequired[str]
503
+ language: NotRequired[str]
504
+ duration: NotRequired[float]
505
+ words: NotRequired[List[WordsTypedDict]]
506
+ segments: NotRequired[List[SegmentsTypedDict]]
507
+
508
+
509
+ class CreateTranscriptionResponseBody2(BaseModel):
510
+ text: str
511
+
512
+ task: Optional[str] = None
513
+
514
+ language: Optional[str] = None
515
+
516
+ duration: Optional[float] = None
517
+
518
+ words: Optional[List[Words]] = None
519
+
520
+ segments: Optional[List[Segments]] = None
521
+
522
+ @model_serializer(mode="wrap")
523
+ def serialize_model(self, handler):
524
+ optional_fields = set(["task", "language", "duration", "words", "segments"])
525
+ serialized = handler(self)
526
+ m = {}
527
+
528
+ for n, f in type(self).model_fields.items():
529
+ k = f.alias or n
530
+ val = serialized.get(k)
531
+
532
+ if val != UNSET_SENTINEL:
533
+ if val is not None or k not in optional_fields:
534
+ m[k] = val
535
+
536
+ return m
537
+
538
+
539
+ class CreateTranscriptionResponseBody1TypedDict(TypedDict):
540
+ text: str
541
+
542
+
543
+ class CreateTranscriptionResponseBody1(BaseModel):
544
+ text: str
545
+
546
+
547
+ CreateTranscriptionResponseBodyTypedDict = TypeAliasType(
548
+ "CreateTranscriptionResponseBodyTypedDict",
549
+ Union[
550
+ CreateTranscriptionResponseBody1TypedDict,
551
+ CreateTranscriptionResponseBody2TypedDict,
552
+ str,
553
+ ],
554
+ )
555
+ r"""Returns the transcription or verbose transcription"""
556
+
557
+
558
+ CreateTranscriptionResponseBody = TypeAliasType(
559
+ "CreateTranscriptionResponseBody",
560
+ Union[CreateTranscriptionResponseBody1, CreateTranscriptionResponseBody2, str],
561
+ )
562
+ r"""Returns the transcription or verbose transcription"""