azure-ai-transcription 1.0.0b2__tar.gz → 1.0.0b3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. azure_ai_transcription-1.0.0b3/CHANGELOG.md +23 -0
  2. {azure_ai_transcription-1.0.0b2/azure_ai_transcription.egg-info → azure_ai_transcription-1.0.0b3}/PKG-INFO +56 -31
  3. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/README.md +44 -29
  4. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_client.py +1 -1
  5. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_configuration.py +1 -1
  6. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_operations/_operations.py +1 -2
  7. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_operations/_patch.py +11 -2
  8. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_utils/model_base.py +112 -12
  9. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_utils/serialization.py +14 -3
  10. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_utils/utils.py +5 -4
  11. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_version.py +1 -1
  12. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/aio/_client.py +1 -1
  13. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/aio/_configuration.py +1 -1
  14. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/aio/_operations/_operations.py +1 -2
  15. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/aio/_operations/_patch.py +11 -2
  16. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/models/_enums.py +2 -2
  17. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/models/_models.py +2 -2
  18. azure_ai_transcription-1.0.0b3/azure/ai/transcription/models/_patch.py +66 -0
  19. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3/azure_ai_transcription.egg-info}/PKG-INFO +56 -31
  20. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure_ai_transcription.egg-info/requires.txt +1 -1
  21. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/pyproject.toml +1 -1
  22. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/async_samples/sample_transcribe_audio_file_async.py +10 -2
  23. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/async_samples/sample_transcribe_from_url_async.py +10 -2
  24. azure_ai_transcription-1.0.0b3/samples/async_samples/sample_transcribe_multiple_languages_async.py +94 -0
  25. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/async_samples/sample_transcribe_with_diarization_async.py +10 -2
  26. azure_ai_transcription-1.0.0b3/samples/async_samples/sample_transcribe_with_enhanced_mode_async.py +308 -0
  27. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/async_samples/sample_transcribe_with_phrase_list_async.py +28 -26
  28. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/async_samples/sample_transcribe_with_profanity_filter_async.py +10 -2
  29. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/sample_transcribe_audio_file.py +10 -2
  30. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/sample_transcribe_from_url.py +10 -2
  31. azure_ai_transcription-1.0.0b3/samples/sample_transcribe_multiple_languages.py +94 -0
  32. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/sample_transcribe_with_diarization.py +10 -2
  33. azure_ai_transcription-1.0.0b3/samples/sample_transcribe_with_enhanced_mode.py +307 -0
  34. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/sample_transcribe_with_phrase_list.py +28 -18
  35. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/sample_transcribe_with_profanity_filter.py +10 -2
  36. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/conftest.py +18 -36
  37. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/preparer.py +9 -15
  38. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_basic.py +15 -18
  39. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_basic_async.py +18 -21
  40. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_client_management.py +4 -4
  41. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_client_management_async.py +2 -2
  42. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_diarization.py +5 -7
  43. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_diarization_async.py +8 -6
  44. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_enhanced_mode.py +6 -7
  45. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_file.py +6 -9
  46. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_file_async.py +6 -9
  47. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_options.py +22 -40
  48. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_options_async.py +16 -24
  49. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_url.py +2 -2
  50. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/tests/test_transcription_url_async.py +3 -3
  51. azure_ai_transcription-1.0.0b2/CHANGELOG.md +0 -13
  52. azure_ai_transcription-1.0.0b2/azure/ai/transcription/models/_patch.py +0 -21
  53. azure_ai_transcription-1.0.0b2/samples/async_samples/sample_transcribe_multiple_languages_async.py +0 -69
  54. azure_ai_transcription-1.0.0b2/samples/async_samples/sample_transcribe_with_enhanced_mode_async.py +0 -84
  55. azure_ai_transcription-1.0.0b2/samples/sample_transcribe_multiple_languages.py +0 -69
  56. azure_ai_transcription-1.0.0b2/samples/sample_transcribe_with_enhanced_mode.py +0 -84
  57. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/LICENSE +0 -0
  58. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/MANIFEST.in +0 -0
  59. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/__init__.py +0 -0
  60. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/__init__.py +0 -0
  61. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/__init__.py +0 -0
  62. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_operations/__init__.py +0 -0
  63. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_patch.py +0 -0
  64. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/_utils/__init__.py +0 -0
  65. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/aio/__init__.py +0 -0
  66. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/aio/_operations/__init__.py +0 -0
  67. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/aio/_patch.py +0 -0
  68. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/models/__init__.py +0 -0
  69. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure/ai/transcription/py.typed +0 -0
  70. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure_ai_transcription.egg-info/SOURCES.txt +0 -0
  71. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure_ai_transcription.egg-info/dependency_links.txt +0 -0
  72. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/azure_ai_transcription.egg-info/top_level.txt +0 -0
  73. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/samples/README.md +0 -0
  74. {azure_ai_transcription-1.0.0b2 → azure_ai_transcription-1.0.0b3}/setup.cfg +0 -0
@@ -0,0 +1,23 @@
1
+ # Release History
2
+
3
+ ## 1.0.0b3 (2026-02-04)
4
+
5
+ ### Features Added
6
+
7
+ - Enhanced Mode now automatically sets `enabled=True` when `task`, `target_language`, or `prompt` are specified
8
+
9
+ ### Bugs Fixed
10
+
11
+ - Fixed Enhanced Mode not being activated when using `EnhancedModeProperties` without explicitly setting `enabled=True`
12
+
13
+ ## 1.0.0b2 (2025-12-19)
14
+
15
+ ### Bugs Fixed
16
+
17
+ - Fixed API reference link
18
+
19
+ ## 1.0.0b1 (2025-12-03)
20
+
21
+ ### Other Changes
22
+
23
+ - Initial version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: azure-ai-transcription
3
- Version: 1.0.0b2
3
+ Version: 1.0.0b3
4
4
  Summary: Microsoft Corporation Azure AI Transcription Client Library for Python
5
5
  Author-email: Microsoft Corporation <azpysdkhelp@microsoft.com>
6
6
  License-Expression: MIT
@@ -19,7 +19,7 @@ Requires-Python: >=3.9
19
19
  Description-Content-Type: text/markdown
20
20
  License-File: LICENSE
21
21
  Requires-Dist: isodate>=0.6.1
22
- Requires-Dist: azure-core>=1.35.0
22
+ Requires-Dist: azure-core>=1.37.0
23
23
  Requires-Dist: typing-extensions>=4.6.0
24
24
  Dynamic: license-file
25
25
 
@@ -174,10 +174,18 @@ from azure.ai.transcription.models import TranscriptionContent, TranscriptionOpt
174
174
 
175
175
  # Get configuration from environment variables
176
176
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
177
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
177
+
178
+ # We recommend using role-based access control (RBAC) for production scenarios
179
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
180
+ if api_key:
181
+ credential = AzureKeyCredential(api_key)
182
+ else:
183
+ from azure.identity import DefaultAzureCredential
184
+
185
+ credential = DefaultAzureCredential()
178
186
 
179
187
  # Create the transcription client
180
- client = TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
188
+ client = TranscriptionClient(endpoint=endpoint, credential=credential)
181
189
 
182
190
  # Path to your audio file
183
191
  import pathlib
@@ -222,10 +230,18 @@ from azure.ai.transcription.models import TranscriptionOptions
222
230
 
223
231
  # Get configuration from environment variables
224
232
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
225
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
233
+
234
+ # We recommend using role-based access control (RBAC) for production scenarios
235
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
236
+ if api_key:
237
+ credential = AzureKeyCredential(api_key)
238
+ else:
239
+ from azure.identity import DefaultAzureCredential
240
+
241
+ credential = DefaultAzureCredential()
226
242
 
227
243
  # Create the transcription client
228
- client = TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
244
+ client = TranscriptionClient(endpoint=endpoint, credential=credential)
229
245
 
230
246
  # URL to your audio file (must be publicly accessible)
231
247
  audio_url = "https://example.com/path/to/audio.wav"
@@ -263,31 +279,29 @@ from azure.ai.transcription.models import (
263
279
 
264
280
  # Get configuration from environment variables
265
281
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
266
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
282
+
283
+ # We recommend using role-based access control (RBAC) for production scenarios
284
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
285
+ if api_key:
286
+ credential = AzureKeyCredential(api_key)
287
+ else:
288
+ from azure.identity import DefaultAzureCredential
289
+
290
+ credential = DefaultAzureCredential()
267
291
 
268
292
  # Create the transcription client
269
- client = TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
293
+ client = TranscriptionClient(endpoint=endpoint, credential=credential)
270
294
 
271
295
  # Path to your audio file
272
- import pathlib
273
-
274
296
  audio_file_path = pathlib.Path(__file__).parent / "assets" / "audio.wav"
275
297
 
276
298
  # Open and read the audio file
277
299
  with open(audio_file_path, "rb") as audio_file:
278
- # Create enhanced mode properties
279
- # Enable enhanced mode for advanced processing capabilities
280
- enhanced_mode = EnhancedModeProperties(
281
- task="translation", # Specify the task type (e.g., "translation", "summarization")
282
- target_language="es-ES", # Target language for translation
283
- prompt=[
284
- "Translate the following audio to Spanish",
285
- "Focus on technical terminology",
286
- ], # Optional prompts to guide the enhanced mode
287
- )
300
+ # Enhanced mode is automatically enabled when task is specified
301
+ enhanced_mode = EnhancedModeProperties(task="transcribe")
288
302
 
289
303
  # Create transcription options with enhanced mode
290
- options = TranscriptionOptions(locales=["en-US"], enhanced_mode=enhanced_mode)
304
+ options = TranscriptionOptions(enhanced_mode=enhanced_mode)
291
305
 
292
306
  # Create the request content
293
307
  request_content = TranscriptionContent(definition=options, audio=audio_file)
@@ -296,14 +310,7 @@ with open(audio_file_path, "rb") as audio_file:
296
310
  result = client.transcribe(request_content)
297
311
 
298
312
  # Print the transcription result
299
- print("Transcription with enhanced mode:")
300
- print(f"{result.combined_phrases[0].text}")
301
-
302
- # Print individual phrases if available
303
- if result.phrases:
304
- print("\nDetailed phrases:")
305
- for phrase in result.phrases:
306
- print(f" [{phrase.offset_milliseconds}ms]: {phrase.text}")
313
+ print(result.combined_phrases[0].text)
307
314
  ```
308
315
 
309
316
  <!-- END SNIPPET -->
@@ -321,10 +328,18 @@ from azure.ai.transcription.models import TranscriptionContent, TranscriptionOpt
321
328
 
322
329
  # Get configuration from environment variables
323
330
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
324
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
331
+
332
+ # We recommend using role-based access control (RBAC) for production scenarios
333
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
334
+ if api_key:
335
+ credential = AzureKeyCredential(api_key)
336
+ else:
337
+ from azure.identity.aio import DefaultAzureCredential
338
+
339
+ credential = DefaultAzureCredential()
325
340
 
326
341
  # Create the transcription client
327
- async with TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key)) as client:
342
+ async with TranscriptionClient(endpoint=endpoint, credential=credential) as client:
328
343
  # Path to your audio file
329
344
  import pathlib
330
345
 
@@ -464,6 +479,16 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con
464
479
 
465
480
  # Release History
466
481
 
482
+ ## 1.0.0b3 (2026-02-04)
483
+
484
+ ### Features Added
485
+
486
+ - Enhanced Mode now automatically sets `enabled=True` when `task`, `target_language`, or `prompt` are specified
487
+
488
+ ### Bugs Fixed
489
+
490
+ - Fixed Enhanced Mode not being activated when using `EnhancedModeProperties` without explicitly setting `enabled=True`
491
+
467
492
  ## 1.0.0b2 (2025-12-19)
468
493
 
469
494
  ### Bugs Fixed
@@ -149,10 +149,18 @@ from azure.ai.transcription.models import TranscriptionContent, TranscriptionOpt
149
149
 
150
150
  # Get configuration from environment variables
151
151
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
152
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
152
+
153
+ # We recommend using role-based access control (RBAC) for production scenarios
154
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
155
+ if api_key:
156
+ credential = AzureKeyCredential(api_key)
157
+ else:
158
+ from azure.identity import DefaultAzureCredential
159
+
160
+ credential = DefaultAzureCredential()
153
161
 
154
162
  # Create the transcription client
155
- client = TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
163
+ client = TranscriptionClient(endpoint=endpoint, credential=credential)
156
164
 
157
165
  # Path to your audio file
158
166
  import pathlib
@@ -197,10 +205,18 @@ from azure.ai.transcription.models import TranscriptionOptions
197
205
 
198
206
  # Get configuration from environment variables
199
207
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
200
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
208
+
209
+ # We recommend using role-based access control (RBAC) for production scenarios
210
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
211
+ if api_key:
212
+ credential = AzureKeyCredential(api_key)
213
+ else:
214
+ from azure.identity import DefaultAzureCredential
215
+
216
+ credential = DefaultAzureCredential()
201
217
 
202
218
  # Create the transcription client
203
- client = TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
219
+ client = TranscriptionClient(endpoint=endpoint, credential=credential)
204
220
 
205
221
  # URL to your audio file (must be publicly accessible)
206
222
  audio_url = "https://example.com/path/to/audio.wav"
@@ -238,31 +254,29 @@ from azure.ai.transcription.models import (
238
254
 
239
255
  # Get configuration from environment variables
240
256
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
241
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
257
+
258
+ # We recommend using role-based access control (RBAC) for production scenarios
259
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
260
+ if api_key:
261
+ credential = AzureKeyCredential(api_key)
262
+ else:
263
+ from azure.identity import DefaultAzureCredential
264
+
265
+ credential = DefaultAzureCredential()
242
266
 
243
267
  # Create the transcription client
244
- client = TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
268
+ client = TranscriptionClient(endpoint=endpoint, credential=credential)
245
269
 
246
270
  # Path to your audio file
247
- import pathlib
248
-
249
271
  audio_file_path = pathlib.Path(__file__).parent / "assets" / "audio.wav"
250
272
 
251
273
  # Open and read the audio file
252
274
  with open(audio_file_path, "rb") as audio_file:
253
- # Create enhanced mode properties
254
- # Enable enhanced mode for advanced processing capabilities
255
- enhanced_mode = EnhancedModeProperties(
256
- task="translation", # Specify the task type (e.g., "translation", "summarization")
257
- target_language="es-ES", # Target language for translation
258
- prompt=[
259
- "Translate the following audio to Spanish",
260
- "Focus on technical terminology",
261
- ], # Optional prompts to guide the enhanced mode
262
- )
275
+ # Enhanced mode is automatically enabled when task is specified
276
+ enhanced_mode = EnhancedModeProperties(task="transcribe")
263
277
 
264
278
  # Create transcription options with enhanced mode
265
- options = TranscriptionOptions(locales=["en-US"], enhanced_mode=enhanced_mode)
279
+ options = TranscriptionOptions(enhanced_mode=enhanced_mode)
266
280
 
267
281
  # Create the request content
268
282
  request_content = TranscriptionContent(definition=options, audio=audio_file)
@@ -271,14 +285,7 @@ with open(audio_file_path, "rb") as audio_file:
271
285
  result = client.transcribe(request_content)
272
286
 
273
287
  # Print the transcription result
274
- print("Transcription with enhanced mode:")
275
- print(f"{result.combined_phrases[0].text}")
276
-
277
- # Print individual phrases if available
278
- if result.phrases:
279
- print("\nDetailed phrases:")
280
- for phrase in result.phrases:
281
- print(f" [{phrase.offset_milliseconds}ms]: {phrase.text}")
288
+ print(result.combined_phrases[0].text)
282
289
  ```
283
290
 
284
291
  <!-- END SNIPPET -->
@@ -296,10 +303,18 @@ from azure.ai.transcription.models import TranscriptionContent, TranscriptionOpt
296
303
 
297
304
  # Get configuration from environment variables
298
305
  endpoint = os.environ["AZURE_SPEECH_ENDPOINT"]
299
- api_key = os.environ["AZURE_SPEECH_API_KEY"]
306
+
307
+ # We recommend using role-based access control (RBAC) for production scenarios
308
+ api_key = os.environ.get("AZURE_SPEECH_API_KEY")
309
+ if api_key:
310
+ credential = AzureKeyCredential(api_key)
311
+ else:
312
+ from azure.identity.aio import DefaultAzureCredential
313
+
314
+ credential = DefaultAzureCredential()
300
315
 
301
316
  # Create the transcription client
302
- async with TranscriptionClient(endpoint=endpoint, credential=AzureKeyCredential(api_key)) as client:
317
+ async with TranscriptionClient(endpoint=endpoint, credential=credential) as client:
303
318
  # Path to your audio file
304
319
  import pathlib
305
320
 
@@ -27,7 +27,7 @@ class TranscriptionClient(_TranscriptionClientOperationsMixin):
27
27
  """TranscriptionClient.
28
28
 
29
29
  :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example:
30
- `https://westus.api.cognitive.microsoft.com <https://westus.api.cognitive.microsoft.com>`_.
30
+ `https://westus.api.cognitive.microsoft.com <https://westus.api.cognitive.microsoft.com>`_).
31
31
  Required.
32
32
  :type endpoint: str
33
33
  :param credential: Credential used to authenticate requests to the service. Is either a key
@@ -24,7 +24,7 @@ class TranscriptionClientConfiguration: # pylint: disable=too-many-instance-att
24
24
  attributes.
25
25
 
26
26
  :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example:
27
- `https://westus.api.cognitive.microsoft.com <https://westus.api.cognitive.microsoft.com>`_.
27
+ `https://westus.api.cognitive.microsoft.com <https://westus.api.cognitive.microsoft.com>`_).
28
28
  Required.
29
29
  :type endpoint: str
30
30
  :param credential: Credential used to authenticate requests to the service. Is either a key
@@ -110,12 +110,11 @@ class _TranscriptionClientOperationsMixin(
110
110
  _body = body.as_dict() if isinstance(body, _Model) else body
111
111
  _file_fields: list[str] = ["audio"]
112
112
  _data_fields: list[str] = ["definition"]
113
- _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields)
113
+ _files = prepare_multipart_form_data(_body, _file_fields, _data_fields)
114
114
 
115
115
  _request = build_transcription_transcribe_request(
116
116
  api_version=self._config.api_version,
117
117
  files=_files,
118
- data=_data,
119
118
  headers=_headers,
120
119
  params=_params,
121
120
  )
@@ -12,7 +12,14 @@ from collections.abc import MutableMapping
12
12
  from typing import Any, Optional
13
13
  import json
14
14
  from azure.core.tracing.decorator import distributed_trace
15
- from azure.core.exceptions import map_error, HttpResponseError, ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError, ResourceNotModifiedError
15
+ from azure.core.exceptions import (
16
+ map_error,
17
+ HttpResponseError,
18
+ ClientAuthenticationError,
19
+ ResourceNotFoundError,
20
+ ResourceExistsError,
21
+ ResourceNotModifiedError,
22
+ )
16
23
 
17
24
  from .. import models as _models
18
25
  from .._utils.model_base import _deserialize, SdkJSONEncoder
@@ -93,7 +100,9 @@ class _TranscriptionClientOperationsMixin(_TranscriptionClientOperationsMixinGen
93
100
  }
94
101
  _request.url = self._client.format_url(_request.url, **path_format_arguments)
95
102
 
96
- pipeline_response = self._client._pipeline.run(_request, stream=False, **kwargs) # pylint: disable=protected-access
103
+ pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
104
+ _request, stream=False, **kwargs
105
+ )
97
106
  response = pipeline_response.http_response
98
107
 
99
108
  if response.status_code not in [200]:
@@ -37,6 +37,7 @@ __all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"]
37
37
 
38
38
  TZ_UTC = timezone.utc
39
39
  _T = typing.TypeVar("_T")
40
+ _NONE_TYPE = type(None)
40
41
 
41
42
 
42
43
  def _timedelta_as_isostr(td: timedelta) -> str:
@@ -171,6 +172,21 @@ _VALID_RFC7231 = re.compile(
171
172
  r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT"
172
173
  )
173
174
 
175
+ _ARRAY_ENCODE_MAPPING = {
176
+ "pipeDelimited": "|",
177
+ "spaceDelimited": " ",
178
+ "commaDelimited": ",",
179
+ "newlineDelimited": "\n",
180
+ }
181
+
182
+
183
+ def _deserialize_array_encoded(delimit: str, attr):
184
+ if isinstance(attr, str):
185
+ if attr == "":
186
+ return []
187
+ return attr.split(delimit)
188
+ return attr
189
+
174
190
 
175
191
  def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime:
176
192
  """Deserialize ISO-8601 formatted string into Datetime object.
@@ -202,7 +218,7 @@ def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime:
202
218
  test_utc = date_obj.utctimetuple()
203
219
  if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
204
220
  raise OverflowError("Hit max or min date")
205
- return date_obj
221
+ return date_obj # type: ignore[no-any-return]
206
222
 
207
223
 
208
224
  def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime:
@@ -256,7 +272,7 @@ def _deserialize_time(attr: typing.Union[str, time]) -> time:
256
272
  """
257
273
  if isinstance(attr, time):
258
274
  return attr
259
- return isodate.parse_time(attr)
275
+ return isodate.parse_time(attr) # type: ignore[no-any-return]
260
276
 
261
277
 
262
278
  def _deserialize_bytes(attr):
@@ -315,6 +331,8 @@ _DESERIALIZE_MAPPING_WITHFORMAT = {
315
331
  def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None):
316
332
  if annotation is int and rf and rf._format == "str":
317
333
  return _deserialize_int_as_str
334
+ if annotation is str and rf and rf._format in _ARRAY_ENCODE_MAPPING:
335
+ return functools.partial(_deserialize_array_encoded, _ARRAY_ENCODE_MAPPING[rf._format])
318
336
  if rf and rf._format:
319
337
  return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format)
320
338
  return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore
@@ -353,9 +371,39 @@ class _MyMutableMapping(MutableMapping[str, typing.Any]):
353
371
  return key in self._data
354
372
 
355
373
  def __getitem__(self, key: str) -> typing.Any:
374
+ # If this key has been deserialized (for mutable types), we need to handle serialization
375
+ if hasattr(self, "_attr_to_rest_field"):
376
+ cache_attr = f"_deserialized_{key}"
377
+ if hasattr(self, cache_attr):
378
+ rf = _get_rest_field(getattr(self, "_attr_to_rest_field"), key)
379
+ if rf:
380
+ value = self._data.get(key)
381
+ if isinstance(value, (dict, list, set)):
382
+ # For mutable types, serialize and return
383
+ # But also update _data with serialized form and clear flag
384
+ # so mutations via this returned value affect _data
385
+ serialized = _serialize(value, rf._format)
386
+ # If serialized form is same type (no transformation needed),
387
+ # return _data directly so mutations work
388
+ if isinstance(serialized, type(value)) and serialized == value:
389
+ return self._data.get(key)
390
+ # Otherwise return serialized copy and clear flag
391
+ try:
392
+ object.__delattr__(self, cache_attr)
393
+ except AttributeError:
394
+ pass
395
+ # Store serialized form back
396
+ self._data[key] = serialized
397
+ return serialized
356
398
  return self._data.__getitem__(key)
357
399
 
358
400
  def __setitem__(self, key: str, value: typing.Any) -> None:
401
+ # Clear any cached deserialized value when setting through dictionary access
402
+ cache_attr = f"_deserialized_{key}"
403
+ try:
404
+ object.__delattr__(self, cache_attr)
405
+ except AttributeError:
406
+ pass
359
407
  self._data.__setitem__(key, value)
360
408
 
361
409
  def __delitem__(self, key: str) -> None:
@@ -483,6 +531,8 @@ def _is_model(obj: typing.Any) -> bool:
483
531
 
484
532
  def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements
485
533
  if isinstance(o, list):
534
+ if format in _ARRAY_ENCODE_MAPPING and all(isinstance(x, str) for x in o):
535
+ return _ARRAY_ENCODE_MAPPING[format].join(o)
486
536
  return [_serialize(x, format) for x in o]
487
537
  if isinstance(o, dict):
488
538
  return {k: _serialize(v, format) for k, v in o.items()}
@@ -758,6 +808,14 @@ def _deserialize_multiple_sequence(
758
808
  return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers))
759
809
 
760
810
 
811
+ def _is_array_encoded_deserializer(deserializer: functools.partial) -> bool:
812
+ return (
813
+ isinstance(deserializer, functools.partial)
814
+ and isinstance(deserializer.args[0], functools.partial)
815
+ and deserializer.args[0].func == _deserialize_array_encoded # pylint: disable=comparison-with-callable
816
+ )
817
+
818
+
761
819
  def _deserialize_sequence(
762
820
  deserializer: typing.Optional[typing.Callable],
763
821
  module: typing.Optional[str],
@@ -767,6 +825,19 @@ def _deserialize_sequence(
767
825
  return obj
768
826
  if isinstance(obj, ET.Element):
769
827
  obj = list(obj)
828
+
829
+ # encoded string may be deserialized to sequence
830
+ if isinstance(obj, str) and isinstance(deserializer, functools.partial):
831
+ # for list[str]
832
+ if _is_array_encoded_deserializer(deserializer):
833
+ return deserializer(obj)
834
+
835
+ # for list[Union[...]]
836
+ if isinstance(deserializer.args[0], list):
837
+ for sub_deserializer in deserializer.args[0]:
838
+ if _is_array_encoded_deserializer(sub_deserializer):
839
+ return sub_deserializer(obj)
840
+
770
841
  return type(obj)(_deserialize(deserializer, entry, module) for entry in obj)
771
842
 
772
843
 
@@ -817,16 +888,16 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur
817
888
 
818
889
  # is it optional?
819
890
  try:
820
- if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore
891
+ if any(a is _NONE_TYPE for a in annotation.__args__): # pyright: ignore
821
892
  if len(annotation.__args__) <= 2: # pyright: ignore
822
893
  if_obj_deserializer = _get_deserialize_callable_from_annotation(
823
- next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore
894
+ next(a for a in annotation.__args__ if a is not _NONE_TYPE), module, rf # pyright: ignore
824
895
  )
825
896
 
826
897
  return functools.partial(_deserialize_with_optional, if_obj_deserializer)
827
898
  # the type is Optional[Union[...]], we need to remove the None type from the Union
828
899
  annotation_copy = copy.copy(annotation)
829
- annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore
900
+ annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a is not _NONE_TYPE] # pyright: ignore
830
901
  return _get_deserialize_callable_from_annotation(annotation_copy, module, rf)
831
902
  except AttributeError:
832
903
  pass
@@ -952,7 +1023,7 @@ def _failsafe_deserialize(
952
1023
  ) -> typing.Any:
953
1024
  try:
954
1025
  return _deserialize(deserializer, response.json(), module, rf, format)
955
- except DeserializationError:
1026
+ except Exception: # pylint: disable=broad-except
956
1027
  _LOGGER.warning(
957
1028
  "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
958
1029
  )
@@ -965,7 +1036,7 @@ def _failsafe_deserialize_xml(
965
1036
  ) -> typing.Any:
966
1037
  try:
967
1038
  return _deserialize_xml(deserializer, response.text())
968
- except DeserializationError:
1039
+ except Exception: # pylint: disable=broad-except
969
1040
  _LOGGER.warning(
970
1041
  "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
971
1042
  )
@@ -998,7 +1069,11 @@ class _RestField:
998
1069
 
999
1070
  @property
1000
1071
  def _class_type(self) -> typing.Any:
1001
- return getattr(self._type, "args", [None])[0]
1072
+ result = getattr(self._type, "args", [None])[0]
1073
+ # type may be wrapped by nested functools.partial so we need to check for that
1074
+ if isinstance(result, functools.partial):
1075
+ return getattr(result, "args", [None])[0]
1076
+ return result
1002
1077
 
1003
1078
  @property
1004
1079
  def _rest_name(self) -> str:
@@ -1009,14 +1084,37 @@ class _RestField:
1009
1084
  def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin
1010
1085
  # by this point, type and rest_name will have a value bc we default
1011
1086
  # them in __new__ of the Model class
1012
- item = obj.get(self._rest_name)
1087
+ # Use _data.get() directly to avoid triggering __getitem__ which clears the cache
1088
+ item = obj._data.get(self._rest_name)
1013
1089
  if item is None:
1014
1090
  return item
1015
1091
  if self._is_model:
1016
1092
  return item
1017
- return _deserialize(self._type, _serialize(item, self._format), rf=self)
1093
+
1094
+ # For mutable types, we want mutations to directly affect _data
1095
+ # Check if we've already deserialized this value
1096
+ cache_attr = f"_deserialized_{self._rest_name}"
1097
+ if hasattr(obj, cache_attr):
1098
+ # Return the value from _data directly (it's been deserialized in place)
1099
+ return obj._data.get(self._rest_name)
1100
+
1101
+ deserialized = _deserialize(self._type, _serialize(item, self._format), rf=self)
1102
+
1103
+ # For mutable types, store the deserialized value back in _data
1104
+ # so mutations directly affect _data
1105
+ if isinstance(deserialized, (dict, list, set)):
1106
+ obj._data[self._rest_name] = deserialized
1107
+ object.__setattr__(obj, cache_attr, True) # Mark as deserialized
1108
+ return deserialized
1109
+
1110
+ return deserialized
1018
1111
 
1019
1112
  def __set__(self, obj: Model, value) -> None:
1113
+ # Clear the cached deserialized object when setting a new value
1114
+ cache_attr = f"_deserialized_{self._rest_name}"
1115
+ if hasattr(obj, cache_attr):
1116
+ object.__delattr__(obj, cache_attr)
1117
+
1020
1118
  if value is None:
1021
1119
  # we want to wipe out entries if users set attr to None
1022
1120
  try:
@@ -1184,7 +1282,7 @@ def _get_wrapped_element(
1184
1282
  _get_element(v, exclude_readonly, meta, wrapped_element)
1185
1283
  else:
1186
1284
  wrapped_element.text = _get_primitive_type_value(v)
1187
- return wrapped_element
1285
+ return wrapped_element # type: ignore[no-any-return]
1188
1286
 
1189
1287
 
1190
1288
  def _get_primitive_type_value(v) -> str:
@@ -1197,7 +1295,9 @@ def _get_primitive_type_value(v) -> str:
1197
1295
  return str(v)
1198
1296
 
1199
1297
 
1200
- def _create_xml_element(tag, prefix=None, ns=None):
1298
+ def _create_xml_element(
1299
+ tag: typing.Any, prefix: typing.Optional[str] = None, ns: typing.Optional[str] = None
1300
+ ) -> ET.Element:
1201
1301
  if prefix and ns:
1202
1302
  ET.register_namespace(prefix, ns)
1203
1303
  if ns:
@@ -821,13 +821,20 @@ class Serializer: # pylint: disable=too-many-public-methods
821
821
  :param str data_type: Type of object in the iterable.
822
822
  :rtype: str, int, float, bool
823
823
  :return: serialized object
824
+ :raises TypeError: raise if data_type is not one of str, int, float, bool.
824
825
  """
825
826
  custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
826
827
  if custom_serializer:
827
828
  return custom_serializer(data)
828
829
  if data_type == "str":
829
830
  return cls.serialize_unicode(data)
830
- return eval(data_type)(data) # nosec # pylint: disable=eval-used
831
+ if data_type == "int":
832
+ return int(data)
833
+ if data_type == "float":
834
+ return float(data)
835
+ if data_type == "bool":
836
+ return bool(data)
837
+ raise TypeError("Unknown basic data type: {}".format(data_type))
831
838
 
832
839
  @classmethod
833
840
  def serialize_unicode(cls, data):
@@ -1757,7 +1764,7 @@ class Deserializer:
1757
1764
  :param str data_type: deserialization data type.
1758
1765
  :return: Deserialized basic type.
1759
1766
  :rtype: str, int, float or bool
1760
- :raises TypeError: if string format is not valid.
1767
+ :raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool.
1761
1768
  """
1762
1769
  # If we're here, data is supposed to be a basic type.
1763
1770
  # If it's still an XML node, take the text
@@ -1783,7 +1790,11 @@ class Deserializer:
1783
1790
 
1784
1791
  if data_type == "str":
1785
1792
  return self.deserialize_unicode(attr)
1786
- return eval(data_type)(attr) # nosec # pylint: disable=eval-used
1793
+ if data_type == "int":
1794
+ return int(attr)
1795
+ if data_type == "float":
1796
+ return float(attr)
1797
+ raise TypeError("Unknown basic data type: {}".format(data_type))
1787
1798
 
1788
1799
  @staticmethod
1789
1800
  def deserialize_unicode(data):