orq-ai-sdk 4.2.0rc48__py3-none-any.whl → 4.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/chat.py +22 -0
  5. orq_ai_sdk/completions.py +438 -0
  6. orq_ai_sdk/contacts.py +43 -886
  7. orq_ai_sdk/deployments.py +61 -0
  8. orq_ai_sdk/edits.py +364 -0
  9. orq_ai_sdk/embeddings.py +344 -0
  10. orq_ai_sdk/generations.py +370 -0
  11. orq_ai_sdk/images.py +28 -0
  12. orq_ai_sdk/models/__init__.py +3839 -424
  13. orq_ai_sdk/models/conversationresponse.py +1 -1
  14. orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
  15. orq_ai_sdk/models/createagentrequestop.py +768 -12
  16. orq_ai_sdk/models/createagentresponse.py +68 -2
  17. orq_ai_sdk/models/createchatcompletionop.py +538 -313
  18. orq_ai_sdk/models/createcompletionop.py +2078 -0
  19. orq_ai_sdk/models/createcontactop.py +5 -10
  20. orq_ai_sdk/models/createconversationop.py +1 -1
  21. orq_ai_sdk/models/createconversationresponseop.py +2 -2
  22. orq_ai_sdk/models/createdatasetitemop.py +4 -4
  23. orq_ai_sdk/models/createdatasetop.py +1 -1
  24. orq_ai_sdk/models/createdatasourceop.py +1 -1
  25. orq_ai_sdk/models/createembeddingop.py +579 -0
  26. orq_ai_sdk/models/createevalop.py +14 -14
  27. orq_ai_sdk/models/createidentityop.py +1 -1
  28. orq_ai_sdk/models/createimageeditop.py +715 -0
  29. orq_ai_sdk/models/createimageop.py +228 -82
  30. orq_ai_sdk/models/createimagevariationop.py +706 -0
  31. orq_ai_sdk/models/creatememoryop.py +4 -2
  32. orq_ai_sdk/models/createmoderationop.py +521 -0
  33. orq_ai_sdk/models/createpromptop.py +375 -6
  34. orq_ai_sdk/models/creatererankop.py +608 -0
  35. orq_ai_sdk/models/createresponseop.py +2567 -0
  36. orq_ai_sdk/models/createspeechop.py +466 -0
  37. orq_ai_sdk/models/createtoolop.py +6 -6
  38. orq_ai_sdk/models/createtranscriptionop.py +732 -0
  39. orq_ai_sdk/models/createtranslationop.py +702 -0
  40. orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
  41. orq_ai_sdk/models/deploymentsop.py +1 -0
  42. orq_ai_sdk/models/deploymentstreamop.py +7 -0
  43. orq_ai_sdk/models/filegetop.py +1 -1
  44. orq_ai_sdk/models/filelistop.py +1 -1
  45. orq_ai_sdk/models/fileuploadop.py +1 -1
  46. orq_ai_sdk/models/generateconversationnameop.py +1 -1
  47. orq_ai_sdk/models/getallmemoriesop.py +4 -2
  48. orq_ai_sdk/models/getallpromptsop.py +188 -3
  49. orq_ai_sdk/models/getalltoolsop.py +6 -6
  50. orq_ai_sdk/models/getevalsop.py +17 -17
  51. orq_ai_sdk/models/getonepromptop.py +188 -3
  52. orq_ai_sdk/models/getpromptversionop.py +188 -3
  53. orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
  54. orq_ai_sdk/models/listagentsop.py +372 -0
  55. orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
  56. orq_ai_sdk/models/listdatasetsop.py +1 -1
  57. orq_ai_sdk/models/listdatasourcesop.py +1 -1
  58. orq_ai_sdk/models/listidentitiesop.py +1 -1
  59. orq_ai_sdk/models/listmodelsop.py +1 -0
  60. orq_ai_sdk/models/listpromptversionsop.py +188 -3
  61. orq_ai_sdk/models/partdoneevent.py +1 -1
  62. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  63. orq_ai_sdk/models/publiccontact.py +9 -3
  64. orq_ai_sdk/models/publicidentity.py +62 -0
  65. orq_ai_sdk/models/reasoningpart.py +1 -1
  66. orq_ai_sdk/models/responsedoneevent.py +14 -11
  67. orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
  68. orq_ai_sdk/models/retrievedatapointop.py +4 -4
  69. orq_ai_sdk/models/retrievedatasetop.py +1 -1
  70. orq_ai_sdk/models/retrievedatasourceop.py +1 -1
  71. orq_ai_sdk/models/retrieveidentityop.py +1 -1
  72. orq_ai_sdk/models/retrievememoryop.py +4 -2
  73. orq_ai_sdk/models/retrievetoolop.py +6 -6
  74. orq_ai_sdk/models/runagentop.py +379 -9
  75. orq_ai_sdk/models/streamrunagentop.py +385 -9
  76. orq_ai_sdk/models/updateagentop.py +770 -12
  77. orq_ai_sdk/models/updateconversationop.py +1 -1
  78. orq_ai_sdk/models/updatedatapointop.py +4 -4
  79. orq_ai_sdk/models/updatedatasetop.py +1 -1
  80. orq_ai_sdk/models/updatedatasourceop.py +1 -1
  81. orq_ai_sdk/models/updateevalop.py +14 -14
  82. orq_ai_sdk/models/updateidentityop.py +1 -1
  83. orq_ai_sdk/models/updatememoryop.py +4 -2
  84. orq_ai_sdk/models/updatepromptop.py +375 -6
  85. orq_ai_sdk/models/updatetoolop.py +7 -7
  86. orq_ai_sdk/moderations.py +218 -0
  87. orq_ai_sdk/orq_completions.py +666 -0
  88. orq_ai_sdk/orq_responses.py +398 -0
  89. orq_ai_sdk/rerank.py +330 -0
  90. orq_ai_sdk/router.py +89 -641
  91. orq_ai_sdk/speech.py +333 -0
  92. orq_ai_sdk/transcriptions.py +416 -0
  93. orq_ai_sdk/translations.py +384 -0
  94. orq_ai_sdk/variations.py +364 -0
  95. orq_ai_sdk-4.2.12.dist-info/METADATA +888 -0
  96. {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/RECORD +98 -75
  97. {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/WHEEL +1 -1
  98. orq_ai_sdk/models/deletecontactop.py +0 -44
  99. orq_ai_sdk/models/listcontactsop.py +0 -265
  100. orq_ai_sdk/models/retrievecontactop.py +0 -142
  101. orq_ai_sdk/models/updatecontactop.py +0 -233
  102. orq_ai_sdk-4.2.0rc48.dist-info/METADATA +0 -788
  103. {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,416 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from orq_ai_sdk import models, utils
5
+ from orq_ai_sdk._hooks import HookContext
6
+ from orq_ai_sdk.models import createtranscriptionop as models_createtranscriptionop
7
+ from orq_ai_sdk.types import OptionalNullable, UNSET
8
+ from orq_ai_sdk.utils import get_security_from_env
9
+ from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
10
+ from typing import Any, List, Mapping, Optional, Union
11
+
12
+
13
+ class Transcriptions(BaseSDK):
14
+ def create(
15
+ self,
16
+ *,
17
+ model: str,
18
+ prompt: Optional[str] = None,
19
+ enable_logging: Optional[bool] = True,
20
+ diarize: Optional[bool] = False,
21
+ response_format: Optional[
22
+ models_createtranscriptionop.CreateTranscriptionResponseFormat
23
+ ] = None,
24
+ tag_audio_events: Optional[bool] = True,
25
+ num_speakers: Optional[float] = None,
26
+ timestamps_granularity: Optional[
27
+ models_createtranscriptionop.TimestampsGranularity
28
+ ] = "word",
29
+ temperature: Optional[float] = None,
30
+ language: Optional[str] = None,
31
+ timestamp_granularities: Optional[
32
+ List[models_createtranscriptionop.TimestampGranularities]
33
+ ] = None,
34
+ name: Optional[str] = None,
35
+ fallbacks: Optional[
36
+ Union[
37
+ List[models_createtranscriptionop.CreateTranscriptionFallbacks],
38
+ List[
39
+ models_createtranscriptionop.CreateTranscriptionFallbacksTypedDict
40
+ ],
41
+ ]
42
+ ] = None,
43
+ retry: Optional[
44
+ Union[
45
+ models_createtranscriptionop.CreateTranscriptionRetry,
46
+ models_createtranscriptionop.CreateTranscriptionRetryTypedDict,
47
+ ]
48
+ ] = None,
49
+ load_balancer: Optional[
50
+ Union[
51
+ models_createtranscriptionop.CreateTranscriptionLoadBalancer,
52
+ models_createtranscriptionop.CreateTranscriptionLoadBalancerTypedDict,
53
+ ]
54
+ ] = None,
55
+ timeout: Optional[
56
+ Union[
57
+ models_createtranscriptionop.CreateTranscriptionTimeout,
58
+ models_createtranscriptionop.CreateTranscriptionTimeoutTypedDict,
59
+ ]
60
+ ] = None,
61
+ orq: Optional[
62
+ Union[
63
+ models_createtranscriptionop.CreateTranscriptionOrq,
64
+ models_createtranscriptionop.CreateTranscriptionOrqTypedDict,
65
+ ]
66
+ ] = None,
67
+ file: Optional[
68
+ Union[
69
+ models_createtranscriptionop.CreateTranscriptionFile,
70
+ models_createtranscriptionop.CreateTranscriptionFileTypedDict,
71
+ ]
72
+ ] = None,
73
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
74
+ server_url: Optional[str] = None,
75
+ timeout_ms: Optional[int] = None,
76
+ http_headers: Optional[Mapping[str, str]] = None,
77
+ ) -> models.CreateTranscriptionResponseBody:
78
+ r"""Create transcription
79
+
80
+ :param model: ID of the model to use
81
+ :param prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
82
+ :param enable_logging: When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers.
83
+ :param diarize: Whether to annotate which speaker is currently talking in the uploaded file.
84
+ :param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
85
+ :param tag_audio_events: Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
86
+ :param num_speakers: The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32.
87
+ :param timestamps_granularity: The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word.
88
+ :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
89
+ :param language: The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
90
+ :param timestamp_granularities: The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
91
+ :param name: The name to display on the trace. If not specified, the default system name will be used.
92
+ :param fallbacks: Array of fallback models to use if primary model fails
93
+ :param retry: Retry configuration for the request
94
+ :param load_balancer: Load balancer configuration for the request.
95
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
96
+ :param orq:
97
+ :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
98
+ :param retries: Override the default retry configuration for this method
99
+ :param server_url: Override the default server URL for this method
100
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
101
+ :param http_headers: Additional headers to set or replace on requests.
102
+ """
103
+ base_url = None
104
+ url_variables = None
105
+ if timeout_ms is None:
106
+ timeout_ms = self.sdk_configuration.timeout_ms
107
+
108
+ if timeout_ms is None:
109
+ timeout_ms = 600000
110
+
111
+ if server_url is not None:
112
+ base_url = server_url
113
+ else:
114
+ base_url = self._get_url(base_url, url_variables)
115
+
116
+ request = models.CreateTranscriptionRequestBody(
117
+ model=model,
118
+ prompt=prompt,
119
+ enable_logging=enable_logging,
120
+ diarize=diarize,
121
+ response_format=response_format,
122
+ tag_audio_events=tag_audio_events,
123
+ num_speakers=num_speakers,
124
+ timestamps_granularity=timestamps_granularity,
125
+ temperature=temperature,
126
+ language=language,
127
+ timestamp_granularities=timestamp_granularities,
128
+ name=name,
129
+ fallbacks=utils.get_pydantic_model(
130
+ fallbacks, Optional[List[models.CreateTranscriptionFallbacks]]
131
+ ),
132
+ retry=utils.get_pydantic_model(
133
+ retry, Optional[models.CreateTranscriptionRetry]
134
+ ),
135
+ load_balancer=utils.get_pydantic_model(
136
+ load_balancer, Optional[models.CreateTranscriptionLoadBalancer]
137
+ ),
138
+ timeout=utils.get_pydantic_model(
139
+ timeout, Optional[models.CreateTranscriptionTimeout]
140
+ ),
141
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateTranscriptionOrq]),
142
+ file=utils.get_pydantic_model(
143
+ file, Optional[models.CreateTranscriptionFile]
144
+ ),
145
+ )
146
+
147
+ req = self._build_request(
148
+ method="POST",
149
+ path="/v2/router/audio/transcriptions",
150
+ base_url=base_url,
151
+ url_variables=url_variables,
152
+ request=request,
153
+ request_body_required=True,
154
+ request_has_path_params=False,
155
+ request_has_query_params=True,
156
+ user_agent_header="user-agent",
157
+ accept_header_value="application/json",
158
+ http_headers=http_headers,
159
+ security=self.sdk_configuration.security,
160
+ get_serialized_body=lambda: utils.serialize_request_body(
161
+ request,
162
+ False,
163
+ False,
164
+ "multipart",
165
+ models.CreateTranscriptionRequestBody,
166
+ ),
167
+ allow_empty_value=None,
168
+ timeout_ms=timeout_ms,
169
+ )
170
+
171
+ if retries == UNSET:
172
+ if self.sdk_configuration.retry_config is not UNSET:
173
+ retries = self.sdk_configuration.retry_config
174
+
175
+ retry_config = None
176
+ if isinstance(retries, utils.RetryConfig):
177
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
178
+
179
+ http_res = self.do_request(
180
+ hook_ctx=HookContext(
181
+ config=self.sdk_configuration,
182
+ base_url=base_url or "",
183
+ operation_id="createTranscription",
184
+ oauth2_scopes=None,
185
+ security_source=get_security_from_env(
186
+ self.sdk_configuration.security, models.Security
187
+ ),
188
+ ),
189
+ request=req,
190
+ error_status_codes=["422", "4XX", "5XX"],
191
+ retry_config=retry_config,
192
+ )
193
+
194
+ response_data: Any = None
195
+ if utils.match_response(http_res, "200", "application/json"):
196
+ return unmarshal_json_response(
197
+ models.CreateTranscriptionResponseBody, http_res
198
+ )
199
+ if utils.match_response(http_res, "422", "application/json"):
200
+ response_data = unmarshal_json_response(
201
+ models.CreateTranscriptionRouterAudioTranscriptionsResponseBodyData,
202
+ http_res,
203
+ )
204
+ raise models.CreateTranscriptionRouterAudioTranscriptionsResponseBody(
205
+ response_data, http_res
206
+ )
207
+ if utils.match_response(http_res, "4XX", "*"):
208
+ http_res_text = utils.stream_to_text(http_res)
209
+ raise models.APIError("API error occurred", http_res, http_res_text)
210
+ if utils.match_response(http_res, "5XX", "*"):
211
+ http_res_text = utils.stream_to_text(http_res)
212
+ raise models.APIError("API error occurred", http_res, http_res_text)
213
+
214
+ raise models.APIError("Unexpected response received", http_res)
215
+
216
+ async def create_async(
217
+ self,
218
+ *,
219
+ model: str,
220
+ prompt: Optional[str] = None,
221
+ enable_logging: Optional[bool] = True,
222
+ diarize: Optional[bool] = False,
223
+ response_format: Optional[
224
+ models_createtranscriptionop.CreateTranscriptionResponseFormat
225
+ ] = None,
226
+ tag_audio_events: Optional[bool] = True,
227
+ num_speakers: Optional[float] = None,
228
+ timestamps_granularity: Optional[
229
+ models_createtranscriptionop.TimestampsGranularity
230
+ ] = "word",
231
+ temperature: Optional[float] = None,
232
+ language: Optional[str] = None,
233
+ timestamp_granularities: Optional[
234
+ List[models_createtranscriptionop.TimestampGranularities]
235
+ ] = None,
236
+ name: Optional[str] = None,
237
+ fallbacks: Optional[
238
+ Union[
239
+ List[models_createtranscriptionop.CreateTranscriptionFallbacks],
240
+ List[
241
+ models_createtranscriptionop.CreateTranscriptionFallbacksTypedDict
242
+ ],
243
+ ]
244
+ ] = None,
245
+ retry: Optional[
246
+ Union[
247
+ models_createtranscriptionop.CreateTranscriptionRetry,
248
+ models_createtranscriptionop.CreateTranscriptionRetryTypedDict,
249
+ ]
250
+ ] = None,
251
+ load_balancer: Optional[
252
+ Union[
253
+ models_createtranscriptionop.CreateTranscriptionLoadBalancer,
254
+ models_createtranscriptionop.CreateTranscriptionLoadBalancerTypedDict,
255
+ ]
256
+ ] = None,
257
+ timeout: Optional[
258
+ Union[
259
+ models_createtranscriptionop.CreateTranscriptionTimeout,
260
+ models_createtranscriptionop.CreateTranscriptionTimeoutTypedDict,
261
+ ]
262
+ ] = None,
263
+ orq: Optional[
264
+ Union[
265
+ models_createtranscriptionop.CreateTranscriptionOrq,
266
+ models_createtranscriptionop.CreateTranscriptionOrqTypedDict,
267
+ ]
268
+ ] = None,
269
+ file: Optional[
270
+ Union[
271
+ models_createtranscriptionop.CreateTranscriptionFile,
272
+ models_createtranscriptionop.CreateTranscriptionFileTypedDict,
273
+ ]
274
+ ] = None,
275
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
276
+ server_url: Optional[str] = None,
277
+ timeout_ms: Optional[int] = None,
278
+ http_headers: Optional[Mapping[str, str]] = None,
279
+ ) -> models.CreateTranscriptionResponseBody:
280
+ r"""Create transcription
281
+
282
+ :param model: ID of the model to use
283
+ :param prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
284
+ :param enable_logging: When enable_logging is set to false, zero retention mode is used. This disables history features like request stitching and is only available to enterprise customers.
285
+ :param diarize: Whether to annotate which speaker is currently talking in the uploaded file.
286
+ :param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
287
+ :param tag_audio_events: Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
288
+ :param num_speakers: The maximum amount of speakers talking in the uploaded file. Helps with predicting who speaks when, the maximum is 32.
289
+ :param timestamps_granularity: The granularity of the timestamps in the transcription. Word provides word-level timestamps and character provides character-level timestamps per word.
290
+ :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
291
+ :param language: The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
292
+ :param timestamp_granularities: The timestamp granularities to populate for this transcription. response_format must be set to verbose_json to use timestamp granularities. Either or both of these options are supported: \"word\" or \"segment\". Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
293
+ :param name: The name to display on the trace. If not specified, the default system name will be used.
294
+ :param fallbacks: Array of fallback models to use if primary model fails
295
+ :param retry: Retry configuration for the request
296
+ :param load_balancer: Load balancer configuration for the request.
297
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
298
+ :param orq:
299
+ :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
300
+ :param retries: Override the default retry configuration for this method
301
+ :param server_url: Override the default server URL for this method
302
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
303
+ :param http_headers: Additional headers to set or replace on requests.
304
+ """
305
+ base_url = None
306
+ url_variables = None
307
+ if timeout_ms is None:
308
+ timeout_ms = self.sdk_configuration.timeout_ms
309
+
310
+ if timeout_ms is None:
311
+ timeout_ms = 600000
312
+
313
+ if server_url is not None:
314
+ base_url = server_url
315
+ else:
316
+ base_url = self._get_url(base_url, url_variables)
317
+
318
+ request = models.CreateTranscriptionRequestBody(
319
+ model=model,
320
+ prompt=prompt,
321
+ enable_logging=enable_logging,
322
+ diarize=diarize,
323
+ response_format=response_format,
324
+ tag_audio_events=tag_audio_events,
325
+ num_speakers=num_speakers,
326
+ timestamps_granularity=timestamps_granularity,
327
+ temperature=temperature,
328
+ language=language,
329
+ timestamp_granularities=timestamp_granularities,
330
+ name=name,
331
+ fallbacks=utils.get_pydantic_model(
332
+ fallbacks, Optional[List[models.CreateTranscriptionFallbacks]]
333
+ ),
334
+ retry=utils.get_pydantic_model(
335
+ retry, Optional[models.CreateTranscriptionRetry]
336
+ ),
337
+ load_balancer=utils.get_pydantic_model(
338
+ load_balancer, Optional[models.CreateTranscriptionLoadBalancer]
339
+ ),
340
+ timeout=utils.get_pydantic_model(
341
+ timeout, Optional[models.CreateTranscriptionTimeout]
342
+ ),
343
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateTranscriptionOrq]),
344
+ file=utils.get_pydantic_model(
345
+ file, Optional[models.CreateTranscriptionFile]
346
+ ),
347
+ )
348
+
349
+ req = self._build_request_async(
350
+ method="POST",
351
+ path="/v2/router/audio/transcriptions",
352
+ base_url=base_url,
353
+ url_variables=url_variables,
354
+ request=request,
355
+ request_body_required=True,
356
+ request_has_path_params=False,
357
+ request_has_query_params=True,
358
+ user_agent_header="user-agent",
359
+ accept_header_value="application/json",
360
+ http_headers=http_headers,
361
+ security=self.sdk_configuration.security,
362
+ get_serialized_body=lambda: utils.serialize_request_body(
363
+ request,
364
+ False,
365
+ False,
366
+ "multipart",
367
+ models.CreateTranscriptionRequestBody,
368
+ ),
369
+ allow_empty_value=None,
370
+ timeout_ms=timeout_ms,
371
+ )
372
+
373
+ if retries == UNSET:
374
+ if self.sdk_configuration.retry_config is not UNSET:
375
+ retries = self.sdk_configuration.retry_config
376
+
377
+ retry_config = None
378
+ if isinstance(retries, utils.RetryConfig):
379
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
380
+
381
+ http_res = await self.do_request_async(
382
+ hook_ctx=HookContext(
383
+ config=self.sdk_configuration,
384
+ base_url=base_url or "",
385
+ operation_id="createTranscription",
386
+ oauth2_scopes=None,
387
+ security_source=get_security_from_env(
388
+ self.sdk_configuration.security, models.Security
389
+ ),
390
+ ),
391
+ request=req,
392
+ error_status_codes=["422", "4XX", "5XX"],
393
+ retry_config=retry_config,
394
+ )
395
+
396
+ response_data: Any = None
397
+ if utils.match_response(http_res, "200", "application/json"):
398
+ return unmarshal_json_response(
399
+ models.CreateTranscriptionResponseBody, http_res
400
+ )
401
+ if utils.match_response(http_res, "422", "application/json"):
402
+ response_data = unmarshal_json_response(
403
+ models.CreateTranscriptionRouterAudioTranscriptionsResponseBodyData,
404
+ http_res,
405
+ )
406
+ raise models.CreateTranscriptionRouterAudioTranscriptionsResponseBody(
407
+ response_data, http_res
408
+ )
409
+ if utils.match_response(http_res, "4XX", "*"):
410
+ http_res_text = await utils.stream_to_text_async(http_res)
411
+ raise models.APIError("API error occurred", http_res, http_res_text)
412
+ if utils.match_response(http_res, "5XX", "*"):
413
+ http_res_text = await utils.stream_to_text_async(http_res)
414
+ raise models.APIError("API error occurred", http_res, http_res_text)
415
+
416
+ raise models.APIError("Unexpected response received", http_res)