orq-ai-sdk 4.2.0rc49__py3-none-any.whl → 4.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/agents.py +186 -186
  4. orq_ai_sdk/audio.py +30 -0
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +438 -0
  7. orq_ai_sdk/contacts.py +43 -886
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +364 -0
  10. orq_ai_sdk/embeddings.py +344 -0
  11. orq_ai_sdk/generations.py +370 -0
  12. orq_ai_sdk/images.py +28 -0
  13. orq_ai_sdk/models/__init__.py +3839 -424
  14. orq_ai_sdk/models/conversationresponse.py +1 -1
  15. orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
  16. orq_ai_sdk/models/createagentrequestop.py +768 -12
  17. orq_ai_sdk/models/createagentresponse.py +68 -2
  18. orq_ai_sdk/models/createchatcompletionop.py +538 -313
  19. orq_ai_sdk/models/createcompletionop.py +2078 -0
  20. orq_ai_sdk/models/createcontactop.py +5 -10
  21. orq_ai_sdk/models/createconversationop.py +1 -1
  22. orq_ai_sdk/models/createconversationresponseop.py +2 -2
  23. orq_ai_sdk/models/createdatasetitemop.py +4 -4
  24. orq_ai_sdk/models/createdatasetop.py +1 -1
  25. orq_ai_sdk/models/createdatasourceop.py +1 -1
  26. orq_ai_sdk/models/createembeddingop.py +579 -0
  27. orq_ai_sdk/models/createevalop.py +14 -14
  28. orq_ai_sdk/models/createidentityop.py +1 -1
  29. orq_ai_sdk/models/createimageeditop.py +715 -0
  30. orq_ai_sdk/models/createimageop.py +228 -82
  31. orq_ai_sdk/models/createimagevariationop.py +706 -0
  32. orq_ai_sdk/models/creatememoryop.py +4 -2
  33. orq_ai_sdk/models/createmoderationop.py +521 -0
  34. orq_ai_sdk/models/createpromptop.py +375 -6
  35. orq_ai_sdk/models/creatererankop.py +608 -0
  36. orq_ai_sdk/models/createresponseop.py +2567 -0
  37. orq_ai_sdk/models/createspeechop.py +466 -0
  38. orq_ai_sdk/models/createtoolop.py +6 -6
  39. orq_ai_sdk/models/createtranscriptionop.py +732 -0
  40. orq_ai_sdk/models/createtranslationop.py +702 -0
  41. orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
  42. orq_ai_sdk/models/deploymentsop.py +1 -0
  43. orq_ai_sdk/models/deploymentstreamop.py +7 -0
  44. orq_ai_sdk/models/filegetop.py +1 -1
  45. orq_ai_sdk/models/filelistop.py +1 -1
  46. orq_ai_sdk/models/fileuploadop.py +1 -1
  47. orq_ai_sdk/models/generateconversationnameop.py +1 -1
  48. orq_ai_sdk/models/getallmemoriesop.py +4 -2
  49. orq_ai_sdk/models/getallpromptsop.py +188 -3
  50. orq_ai_sdk/models/getalltoolsop.py +6 -6
  51. orq_ai_sdk/models/getevalsop.py +17 -17
  52. orq_ai_sdk/models/getonepromptop.py +188 -3
  53. orq_ai_sdk/models/getpromptversionop.py +188 -3
  54. orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
  55. orq_ai_sdk/models/listagentsop.py +372 -0
  56. orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
  57. orq_ai_sdk/models/listdatasetsop.py +1 -1
  58. orq_ai_sdk/models/listdatasourcesop.py +1 -1
  59. orq_ai_sdk/models/listidentitiesop.py +1 -1
  60. orq_ai_sdk/models/listmodelsop.py +1 -0
  61. orq_ai_sdk/models/listpromptversionsop.py +188 -3
  62. orq_ai_sdk/models/partdoneevent.py +1 -1
  63. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  64. orq_ai_sdk/models/publiccontact.py +9 -3
  65. orq_ai_sdk/models/publicidentity.py +62 -0
  66. orq_ai_sdk/models/reasoningpart.py +1 -1
  67. orq_ai_sdk/models/responsedoneevent.py +14 -11
  68. orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
  69. orq_ai_sdk/models/retrievedatapointop.py +4 -4
  70. orq_ai_sdk/models/retrievedatasetop.py +1 -1
  71. orq_ai_sdk/models/retrievedatasourceop.py +1 -1
  72. orq_ai_sdk/models/retrieveidentityop.py +1 -1
  73. orq_ai_sdk/models/retrievememoryop.py +4 -2
  74. orq_ai_sdk/models/retrievetoolop.py +6 -6
  75. orq_ai_sdk/models/runagentop.py +379 -9
  76. orq_ai_sdk/models/streamrunagentop.py +385 -9
  77. orq_ai_sdk/models/updateagentop.py +770 -12
  78. orq_ai_sdk/models/updateconversationop.py +1 -1
  79. orq_ai_sdk/models/updatedatapointop.py +4 -4
  80. orq_ai_sdk/models/updatedatasetop.py +1 -1
  81. orq_ai_sdk/models/updatedatasourceop.py +1 -1
  82. orq_ai_sdk/models/updateevalop.py +14 -14
  83. orq_ai_sdk/models/updateidentityop.py +1 -1
  84. orq_ai_sdk/models/updatememoryop.py +4 -2
  85. orq_ai_sdk/models/updatepromptop.py +375 -6
  86. orq_ai_sdk/models/updatetoolop.py +7 -7
  87. orq_ai_sdk/moderations.py +218 -0
  88. orq_ai_sdk/orq_completions.py +666 -0
  89. orq_ai_sdk/orq_responses.py +398 -0
  90. orq_ai_sdk/rerank.py +330 -0
  91. orq_ai_sdk/router.py +89 -641
  92. orq_ai_sdk/speech.py +333 -0
  93. orq_ai_sdk/transcriptions.py +416 -0
  94. orq_ai_sdk/translations.py +384 -0
  95. orq_ai_sdk/variations.py +364 -0
  96. orq_ai_sdk-4.2.15.dist-info/METADATA +888 -0
  97. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/RECORD +99 -76
  98. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/WHEEL +1 -1
  99. orq_ai_sdk/models/deletecontactop.py +0 -44
  100. orq_ai_sdk/models/listcontactsop.py +0 -265
  101. orq_ai_sdk/models/retrievecontactop.py +0 -142
  102. orq_ai_sdk/models/updatecontactop.py +0 -233
  103. orq_ai_sdk-4.2.0rc49.dist-info/METADATA +0 -788
  104. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,732 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .publiccontact import PublicContact, PublicContactTypedDict
5
+ from .publicidentity import PublicIdentity, PublicIdentityTypedDict
6
+ from dataclasses import dataclass, field
7
+ import httpx
8
+ import io
9
+ from orq_ai_sdk.models import OrqError
10
+ from orq_ai_sdk.types import BaseModel, Nullable, UNSET_SENTINEL
11
+ from orq_ai_sdk.utils import FieldMetadata, MultipartFormMetadata
12
+ import pydantic
13
+ from pydantic import model_serializer
14
+ from typing import IO, List, Literal, Optional, Union
15
+ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
16
+
17
+
18
+ CreateTranscriptionResponseFormat = Literal[
19
+ "json",
20
+ "text",
21
+ "srt",
22
+ "verbose_json",
23
+ "vtt",
24
+ ]
25
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
26
+
27
+
28
+ TimestampsGranularity = Literal[
29
+ "none",
30
+ "word",
31
+ "character",
32
+ ]
33
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
34
+
35
+
36
+ TimestampGranularities = Literal[
37
+ "word",
38
+ "segment",
39
+ ]
40
+
41
+
42
+ class CreateTranscriptionFallbacksTypedDict(TypedDict):
43
+ model: str
44
+ r"""Fallback model identifier"""
45
+
46
+
47
+ class CreateTranscriptionFallbacks(BaseModel):
48
+ model: str
49
+ r"""Fallback model identifier"""
50
+
51
+
52
+ class CreateTranscriptionRetryTypedDict(TypedDict):
53
+ r"""Retry configuration for the request"""
54
+
55
+ count: NotRequired[float]
56
+ r"""Number of retry attempts (1-5)"""
57
+ on_codes: NotRequired[List[float]]
58
+ r"""HTTP status codes that trigger retry logic"""
59
+
60
+
61
+ class CreateTranscriptionRetry(BaseModel):
62
+ r"""Retry configuration for the request"""
63
+
64
+ count: Optional[float] = 3
65
+ r"""Number of retry attempts (1-5)"""
66
+
67
+ on_codes: Optional[List[float]] = None
68
+ r"""HTTP status codes that trigger retry logic"""
69
+
70
+ @model_serializer(mode="wrap")
71
+ def serialize_model(self, handler):
72
+ optional_fields = set(["count", "on_codes"])
73
+ serialized = handler(self)
74
+ m = {}
75
+
76
+ for n, f in type(self).model_fields.items():
77
+ k = f.alias or n
78
+ val = serialized.get(k)
79
+
80
+ if val != UNSET_SENTINEL:
81
+ if val is not None or k not in optional_fields:
82
+ m[k] = val
83
+
84
+ return m
85
+
86
+
87
+ CreateTranscriptionLoadBalancerType = Literal["weight_based",]
88
+
89
+
90
+ class CreateTranscriptionLoadBalancerModelsTypedDict(TypedDict):
91
+ model: str
92
+ r"""Model identifier for load balancing"""
93
+ weight: NotRequired[float]
94
+ r"""Weight assigned to this model for load balancing"""
95
+
96
+
97
+ class CreateTranscriptionLoadBalancerModels(BaseModel):
98
+ model: str
99
+ r"""Model identifier for load balancing"""
100
+
101
+ weight: Optional[float] = 0.5
102
+ r"""Weight assigned to this model for load balancing"""
103
+
104
+ @model_serializer(mode="wrap")
105
+ def serialize_model(self, handler):
106
+ optional_fields = set(["weight"])
107
+ serialized = handler(self)
108
+ m = {}
109
+
110
+ for n, f in type(self).model_fields.items():
111
+ k = f.alias or n
112
+ val = serialized.get(k)
113
+
114
+ if val != UNSET_SENTINEL:
115
+ if val is not None or k not in optional_fields:
116
+ m[k] = val
117
+
118
+ return m
119
+
120
+
121
+ class CreateTranscriptionLoadBalancer1TypedDict(TypedDict):
122
+ type: CreateTranscriptionLoadBalancerType
123
+ models: List[CreateTranscriptionLoadBalancerModelsTypedDict]
124
+
125
+
126
+ class CreateTranscriptionLoadBalancer1(BaseModel):
127
+ type: CreateTranscriptionLoadBalancerType
128
+
129
+ models: List[CreateTranscriptionLoadBalancerModels]
130
+
131
+
132
+ CreateTranscriptionLoadBalancerTypedDict = CreateTranscriptionLoadBalancer1TypedDict
133
+ r"""Load balancer configuration for the request."""
134
+
135
+
136
+ CreateTranscriptionLoadBalancer = CreateTranscriptionLoadBalancer1
137
+ r"""Load balancer configuration for the request."""
138
+
139
+
140
+ class CreateTranscriptionTimeoutTypedDict(TypedDict):
141
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
142
+
143
+ call_timeout: float
144
+ r"""Timeout value in milliseconds"""
145
+
146
+
147
+ class CreateTranscriptionTimeout(BaseModel):
148
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
149
+
150
+ call_timeout: float
151
+ r"""Timeout value in milliseconds"""
152
+
153
+
154
+ class CreateTranscriptionRouterAudioTranscriptionsFallbacksTypedDict(TypedDict):
155
+ model: str
156
+ r"""Fallback model identifier"""
157
+
158
+
159
+ class CreateTranscriptionRouterAudioTranscriptionsFallbacks(BaseModel):
160
+ model: str
161
+ r"""Fallback model identifier"""
162
+
163
+
164
+ class CreateTranscriptionRouterAudioTranscriptionsRetryTypedDict(TypedDict):
165
+ r"""Retry configuration for the request"""
166
+
167
+ count: NotRequired[float]
168
+ r"""Number of retry attempts (1-5)"""
169
+ on_codes: NotRequired[List[float]]
170
+ r"""HTTP status codes that trigger retry logic"""
171
+
172
+
173
+ class CreateTranscriptionRouterAudioTranscriptionsRetry(BaseModel):
174
+ r"""Retry configuration for the request"""
175
+
176
+ count: Optional[float] = 3
177
+ r"""Number of retry attempts (1-5)"""
178
+
179
+ on_codes: Optional[List[float]] = None
180
+ r"""HTTP status codes that trigger retry logic"""
181
+
182
+ @model_serializer(mode="wrap")
183
+ def serialize_model(self, handler):
184
+ optional_fields = set(["count", "on_codes"])
185
+ serialized = handler(self)
186
+ m = {}
187
+
188
+ for n, f in type(self).model_fields.items():
189
+ k = f.alias or n
190
+ val = serialized.get(k)
191
+
192
+ if val != UNSET_SENTINEL:
193
+ if val is not None or k not in optional_fields:
194
+ m[k] = val
195
+
196
+ return m
197
+
198
+
199
+ CreateTranscriptionLoadBalancerRouterAudioTranscriptionsType = Literal["weight_based",]
200
+
201
+
202
+ class CreateTranscriptionLoadBalancerRouterAudioTranscriptionsModelsTypedDict(
203
+ TypedDict
204
+ ):
205
+ model: str
206
+ r"""Model identifier for load balancing"""
207
+ weight: NotRequired[float]
208
+ r"""Weight assigned to this model for load balancing"""
209
+
210
+
211
+ class CreateTranscriptionLoadBalancerRouterAudioTranscriptionsModels(BaseModel):
212
+ model: str
213
+ r"""Model identifier for load balancing"""
214
+
215
+ weight: Optional[float] = 0.5
216
+ r"""Weight assigned to this model for load balancing"""
217
+
218
+ @model_serializer(mode="wrap")
219
+ def serialize_model(self, handler):
220
+ optional_fields = set(["weight"])
221
+ serialized = handler(self)
222
+ m = {}
223
+
224
+ for n, f in type(self).model_fields.items():
225
+ k = f.alias or n
226
+ val = serialized.get(k)
227
+
228
+ if val != UNSET_SENTINEL:
229
+ if val is not None or k not in optional_fields:
230
+ m[k] = val
231
+
232
+ return m
233
+
234
+
235
+ class CreateTranscriptionLoadBalancerRouterAudioTranscriptions1TypedDict(TypedDict):
236
+ type: CreateTranscriptionLoadBalancerRouterAudioTranscriptionsType
237
+ models: List[
238
+ CreateTranscriptionLoadBalancerRouterAudioTranscriptionsModelsTypedDict
239
+ ]
240
+
241
+
242
+ class CreateTranscriptionLoadBalancerRouterAudioTranscriptions1(BaseModel):
243
+ type: CreateTranscriptionLoadBalancerRouterAudioTranscriptionsType
244
+
245
+ models: List[CreateTranscriptionLoadBalancerRouterAudioTranscriptionsModels]
246
+
247
+
248
+ CreateTranscriptionRouterAudioTranscriptionsLoadBalancerTypedDict = (
249
+ CreateTranscriptionLoadBalancerRouterAudioTranscriptions1TypedDict
250
+ )
251
+ r"""Array of models with weights for load balancing requests"""
252
+
253
+
254
+ CreateTranscriptionRouterAudioTranscriptionsLoadBalancer = (
255
+ CreateTranscriptionLoadBalancerRouterAudioTranscriptions1
256
+ )
257
+ r"""Array of models with weights for load balancing requests"""
258
+
259
+
260
+ class CreateTranscriptionRouterAudioTranscriptionsTimeoutTypedDict(TypedDict):
261
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
262
+
263
+ call_timeout: float
264
+ r"""Timeout value in milliseconds"""
265
+
266
+
267
+ class CreateTranscriptionRouterAudioTranscriptionsTimeout(BaseModel):
268
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
269
+
270
+ call_timeout: float
271
+ r"""Timeout value in milliseconds"""
272
+
273
+
274
+ class CreateTranscriptionOrqTypedDict(TypedDict):
275
+ name: NotRequired[str]
276
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
277
+ fallbacks: NotRequired[
278
+ List[CreateTranscriptionRouterAudioTranscriptionsFallbacksTypedDict]
279
+ ]
280
+ r"""Array of fallback models to use if primary model fails"""
281
+ retry: NotRequired[CreateTranscriptionRouterAudioTranscriptionsRetryTypedDict]
282
+ r"""Retry configuration for the request"""
283
+ identity: NotRequired[PublicIdentityTypedDict]
284
+ r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
285
+ contact: NotRequired[PublicContactTypedDict]
286
+ r"""@deprecated Use identity instead. Information about the contact making the request."""
287
+ load_balancer: NotRequired[
288
+ CreateTranscriptionRouterAudioTranscriptionsLoadBalancerTypedDict
289
+ ]
290
+ r"""Array of models with weights for load balancing requests"""
291
+ timeout: NotRequired[CreateTranscriptionRouterAudioTranscriptionsTimeoutTypedDict]
292
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
293
+
294
+
295
+ class CreateTranscriptionOrq(BaseModel):
296
+ name: Optional[str] = None
297
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
298
+
299
+ fallbacks: Optional[List[CreateTranscriptionRouterAudioTranscriptionsFallbacks]] = (
300
+ None
301
+ )
302
+ r"""Array of fallback models to use if primary model fails"""
303
+
304
+ retry: Optional[CreateTranscriptionRouterAudioTranscriptionsRetry] = None
305
+ r"""Retry configuration for the request"""
306
+
307
+ identity: Optional[PublicIdentity] = None
308
+ r"""Information about the identity making the request. If the identity does not exist, it will be created automatically."""
309
+
310
+ contact: Annotated[
311
+ Optional[PublicContact],
312
+ pydantic.Field(
313
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
314
+ ),
315
+ ] = None
316
+ r"""@deprecated Use identity instead. Information about the contact making the request."""
317
+
318
+ load_balancer: Optional[
319
+ CreateTranscriptionRouterAudioTranscriptionsLoadBalancer
320
+ ] = None
321
+ r"""Array of models with weights for load balancing requests"""
322
+
323
+ timeout: Optional[CreateTranscriptionRouterAudioTranscriptionsTimeout] = None
324
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
325
+
326
+ @model_serializer(mode="wrap")
327
+ def serialize_model(self, handler):
328
+ optional_fields = set(
329
+ [
330
+ "name",
331
+ "fallbacks",
332
+ "retry",
333
+ "identity",
334
+ "contact",
335
+ "load_balancer",
336
+ "timeout",
337
+ ]
338
+ )
339
+ serialized = handler(self)
340
+ m = {}
341
+
342
+ for n, f in type(self).model_fields.items():
343
+ k = f.alias or n
344
+ val = serialized.get(k)
345
+
346
+ if val != UNSET_SENTINEL:
347
+ if val is not None or k not in optional_fields:
348
+ m[k] = val
349
+
350
+ return m
351
+
352
+
353
+ class CreateTranscriptionFileTypedDict(TypedDict):
354
+ file_name: str
355
+ content: Union[bytes, IO[bytes], io.BufferedReader]
356
+ content_type: NotRequired[str]
357
+
358
+
359
+ class CreateTranscriptionFile(BaseModel):
360
+ file_name: Annotated[
361
+ str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True)
362
+ ]
363
+
364
+ content: Annotated[
365
+ Union[bytes, IO[bytes], io.BufferedReader],
366
+ pydantic.Field(alias=""),
367
+ FieldMetadata(multipart=MultipartFormMetadata(content=True)),
368
+ ]
369
+
370
+ content_type: Annotated[
371
+ Optional[str],
372
+ pydantic.Field(alias="Content-Type"),
373
+ FieldMetadata(multipart=True),
374
+ ] = None
375
+
376
+ @model_serializer(mode="wrap")
377
+ def serialize_model(self, handler):
378
+ optional_fields = set(["contentType"])
379
+ serialized = handler(self)
380
+ m = {}
381
+
382
+ for n, f in type(self).model_fields.items():
383
+ k = f.alias or n
384
+ val = serialized.get(k)
385
+
386
+ if val != UNSET_SENTINEL:
387
+ if val is not None or k not in optional_fields:
388
+ m[k] = val
389
+
390
+ return m
391
+
392
+
393
+ class CreateTranscriptionRequestBodyTypedDict(TypedDict):
394
+ r"""Transcribes audio into the input language."""
395
+
396
+ model: str
397
+ r"""ID of the model to use"""
398
+ prompt: NotRequired[str]
399
+ r"""An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language."""
400
+ enable_logging: NotRequired[bool]
401
+ r"""When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers."""
402
+ diarize: NotRequired[bool]
403
+ r"""Whether to annotate which speaker is currently talking in the uploaded file."""
404
+ response_format: NotRequired[CreateTranscriptionResponseFormat]
405
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
406
+ tag_audio_events: NotRequired[bool]
407
+ r"""Whether to tag audio events like (laughter), (footsteps), etc. in the transcription."""
408
+ num_speakers: NotRequired[float]
409
+ r"""The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32."""
410
+ timestamps_granularity: NotRequired[TimestampsGranularity]
411
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
412
+ temperature: NotRequired[float]
413
+ r"""The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit."""
414
+ language: NotRequired[str]
415
+ r"""The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency."""
416
+ timestamp_granularities: NotRequired[List[TimestampGranularities]]
417
+ r"""The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency."""
418
+ name: NotRequired[str]
419
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
420
+ fallbacks: NotRequired[List[CreateTranscriptionFallbacksTypedDict]]
421
+ r"""Array of fallback models to use if primary model fails"""
422
+ retry: NotRequired[CreateTranscriptionRetryTypedDict]
423
+ r"""Retry configuration for the request"""
424
+ load_balancer: NotRequired[CreateTranscriptionLoadBalancerTypedDict]
425
+ r"""Load balancer configuration for the request."""
426
+ timeout: NotRequired[CreateTranscriptionTimeoutTypedDict]
427
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
428
+ orq: NotRequired[CreateTranscriptionOrqTypedDict]
429
+ file: NotRequired[CreateTranscriptionFileTypedDict]
430
+ r"""The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm."""
431
+
432
+
433
+ class CreateTranscriptionRequestBody(BaseModel):
434
+ r"""Transcribes audio into the input language."""
435
+
436
+ model: Annotated[str, FieldMetadata(multipart=True)]
437
+ r"""ID of the model to use"""
438
+
439
+ prompt: Annotated[Optional[str], FieldMetadata(multipart=True)] = None
440
+ r"""An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language."""
441
+
442
+ enable_logging: Annotated[Optional[bool], FieldMetadata(multipart=True)] = True
443
+ r"""When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers."""
444
+
445
+ diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False
446
+ r"""Whether to annotate which speaker is currently talking in the uploaded file."""
447
+
448
+ response_format: Annotated[
449
+ Optional[CreateTranscriptionResponseFormat], FieldMetadata(multipart=True)
450
+ ] = None
451
+ r"""The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt."""
452
+
453
+ tag_audio_events: Annotated[Optional[bool], FieldMetadata(multipart=True)] = True
454
+ r"""Whether to tag audio events like (laughter), (footsteps), etc. in the transcription."""
455
+
456
+ num_speakers: Annotated[Optional[float], FieldMetadata(multipart=True)] = None
457
+ r"""The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32."""
458
+
459
+ timestamps_granularity: Annotated[
460
+ Optional[TimestampsGranularity], FieldMetadata(multipart=True)
461
+ ] = "word"
462
+ r"""The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word."""
463
+
464
+ temperature: Annotated[Optional[float], FieldMetadata(multipart=True)] = None
465
+ r"""The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit."""
466
+
467
+ language: Annotated[Optional[str], FieldMetadata(multipart=True)] = None
468
+ r"""The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency."""
469
+
470
+ timestamp_granularities: Annotated[
471
+ Optional[List[TimestampGranularities]], FieldMetadata(multipart=True)
472
+ ] = None
473
+ r"""The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency."""
474
+
475
+ name: Annotated[Optional[str], FieldMetadata(multipart=True)] = None
476
+ r"""The name to display on the trace. If not specified, the default system name will be used."""
477
+
478
+ fallbacks: Annotated[
479
+ Optional[List[CreateTranscriptionFallbacks]],
480
+ FieldMetadata(multipart=MultipartFormMetadata(json=True)),
481
+ ] = None
482
+ r"""Array of fallback models to use if primary model fails"""
483
+
484
+ retry: Annotated[
485
+ Optional[CreateTranscriptionRetry],
486
+ FieldMetadata(multipart=MultipartFormMetadata(json=True)),
487
+ ] = None
488
+ r"""Retry configuration for the request"""
489
+
490
+ load_balancer: Annotated[
491
+ Optional[CreateTranscriptionLoadBalancer],
492
+ FieldMetadata(multipart=MultipartFormMetadata(json=True)),
493
+ ] = None
494
+ r"""Load balancer configuration for the request."""
495
+
496
+ timeout: Annotated[
497
+ Optional[CreateTranscriptionTimeout],
498
+ FieldMetadata(multipart=MultipartFormMetadata(json=True)),
499
+ ] = None
500
+ r"""Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured."""
501
+
502
+ orq: Annotated[
503
+ Optional[CreateTranscriptionOrq],
504
+ FieldMetadata(multipart=MultipartFormMetadata(json=True)),
505
+ ] = None
506
+
507
+ file: Annotated[
508
+ Optional[CreateTranscriptionFile],
509
+ FieldMetadata(multipart=MultipartFormMetadata(file=True)),
510
+ ] = None
511
+ r"""The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm."""
512
+
513
+ @model_serializer(mode="wrap")
514
+ def serialize_model(self, handler):
515
+ optional_fields = set(
516
+ [
517
+ "prompt",
518
+ "enable_logging",
519
+ "diarize",
520
+ "response_format",
521
+ "tag_audio_events",
522
+ "num_speakers",
523
+ "timestamps_granularity",
524
+ "temperature",
525
+ "language",
526
+ "timestamp_granularities",
527
+ "name",
528
+ "fallbacks",
529
+ "retry",
530
+ "load_balancer",
531
+ "timeout",
532
+ "orq",
533
+ "file",
534
+ ]
535
+ )
536
+ serialized = handler(self)
537
+ m = {}
538
+
539
+ for n, f in type(self).model_fields.items():
540
+ k = f.alias or n
541
+ val = serialized.get(k)
542
+
543
+ if val != UNSET_SENTINEL:
544
+ if val is not None or k not in optional_fields:
545
+ m[k] = val
546
+
547
+ return m
548
+
549
+
550
+ class CreateTranscriptionErrorTypedDict(TypedDict):
551
+ message: str
552
+ type: str
553
+ param: Nullable[str]
554
+ code: str
555
+
556
+
557
+ class CreateTranscriptionError(BaseModel):
558
+ message: str
559
+
560
+ type: str
561
+
562
+ param: Nullable[str]
563
+
564
+ code: str
565
+
566
+ @model_serializer(mode="wrap")
567
+ def serialize_model(self, handler):
568
+ serialized = handler(self)
569
+ m = {}
570
+
571
+ for n, f in type(self).model_fields.items():
572
+ k = f.alias or n
573
+ val = serialized.get(k)
574
+
575
+ if val != UNSET_SENTINEL:
576
+ m[k] = val
577
+
578
+ return m
579
+
580
+
581
+ class CreateTranscriptionRouterAudioTranscriptionsResponseBodyData(BaseModel):
582
+ error: CreateTranscriptionError
583
+
584
+
585
+ @dataclass(unsafe_hash=True)
586
+ class CreateTranscriptionRouterAudioTranscriptionsResponseBody(OrqError):
587
+ r"""Returns validation error"""
588
+
589
+ data: CreateTranscriptionRouterAudioTranscriptionsResponseBodyData = field(
590
+ hash=False
591
+ )
592
+
593
+ def __init__(
594
+ self,
595
+ data: CreateTranscriptionRouterAudioTranscriptionsResponseBodyData,
596
+ raw_response: httpx.Response,
597
+ body: Optional[str] = None,
598
+ ):
599
+ fallback = body or raw_response.text
600
+ message = str(data.error.message) or fallback
601
+ super().__init__(message, raw_response, body)
602
+ object.__setattr__(self, "data", data)
603
+
604
+
605
+ class WordsTypedDict(TypedDict):
606
+ word: NotRequired[str]
607
+ start: NotRequired[float]
608
+ end: NotRequired[float]
609
+
610
+
611
+ class Words(BaseModel):
612
+ word: Optional[str] = None
613
+
614
+ start: Optional[float] = None
615
+
616
+ end: Optional[float] = None
617
+
618
+ @model_serializer(mode="wrap")
619
+ def serialize_model(self, handler):
620
+ optional_fields = set(["word", "start", "end"])
621
+ serialized = handler(self)
622
+ m = {}
623
+
624
+ for n, f in type(self).model_fields.items():
625
+ k = f.alias or n
626
+ val = serialized.get(k)
627
+
628
+ if val != UNSET_SENTINEL:
629
+ if val is not None or k not in optional_fields:
630
+ m[k] = val
631
+
632
+ return m
633
+
634
+
635
+ class SegmentsTypedDict(TypedDict):
636
+ id: float
637
+ seek: float
638
+ start: float
639
+ end: float
640
+ text: str
641
+ tokens: List[float]
642
+ temperature: float
643
+ avg_logprob: float
644
+ compression_ratio: float
645
+ no_speech_prob: float
646
+
647
+
648
+ class Segments(BaseModel):
649
+ id: float
650
+
651
+ seek: float
652
+
653
+ start: float
654
+
655
+ end: float
656
+
657
+ text: str
658
+
659
+ tokens: List[float]
660
+
661
+ temperature: float
662
+
663
+ avg_logprob: float
664
+
665
+ compression_ratio: float
666
+
667
+ no_speech_prob: float
668
+
669
+
670
+ class CreateTranscriptionResponseBody2TypedDict(TypedDict):
671
+ text: str
672
+ task: NotRequired[str]
673
+ language: NotRequired[str]
674
+ duration: NotRequired[float]
675
+ words: NotRequired[List[WordsTypedDict]]
676
+ segments: NotRequired[List[SegmentsTypedDict]]
677
+
678
+
679
+ class CreateTranscriptionResponseBody2(BaseModel):
680
+ text: str
681
+
682
+ task: Optional[str] = None
683
+
684
+ language: Optional[str] = None
685
+
686
+ duration: Optional[float] = None
687
+
688
+ words: Optional[List[Words]] = None
689
+
690
+ segments: Optional[List[Segments]] = None
691
+
692
+ @model_serializer(mode="wrap")
693
+ def serialize_model(self, handler):
694
+ optional_fields = set(["task", "language", "duration", "words", "segments"])
695
+ serialized = handler(self)
696
+ m = {}
697
+
698
+ for n, f in type(self).model_fields.items():
699
+ k = f.alias or n
700
+ val = serialized.get(k)
701
+
702
+ if val != UNSET_SENTINEL:
703
+ if val is not None or k not in optional_fields:
704
+ m[k] = val
705
+
706
+ return m
707
+
708
+
709
+ class CreateTranscriptionResponseBody1TypedDict(TypedDict):
710
+ text: str
711
+
712
+
713
+ class CreateTranscriptionResponseBody1(BaseModel):
714
+ text: str
715
+
716
+
717
+ CreateTranscriptionResponseBodyTypedDict = TypeAliasType(
718
+ "CreateTranscriptionResponseBodyTypedDict",
719
+ Union[
720
+ CreateTranscriptionResponseBody1TypedDict,
721
+ CreateTranscriptionResponseBody2TypedDict,
722
+ str,
723
+ ],
724
+ )
725
+ r"""Returns the transcription or verbose transcription"""
726
+
727
+
728
+ CreateTranscriptionResponseBody = TypeAliasType(
729
+ "CreateTranscriptionResponseBody",
730
+ Union[CreateTranscriptionResponseBody1, CreateTranscriptionResponseBody2, str],
731
+ )
732
+ r"""Returns the transcription or verbose transcription"""