murf 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of murf might be problematic. Click here for more details.

@@ -54,6 +54,7 @@ class TextToSpeechClient:
54
54
  The text that is to be synthesised. e.g. 'Hello there [pause 1s] friend'
55
55
 
56
56
  voice_id : str
57
+ Use the GET /v1/speech/voices api to find supported voiceIds.
57
58
 
58
59
  audio_duration : typing.Optional[float]
59
60
  This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
@@ -72,7 +73,7 @@ class TextToSpeechClient:
72
73
 
73
74
  multi_native_locale : typing.Optional[str]
74
75
  Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
75
- Valid values: "en-US", "en-UK", "es-ES", etc. Use the GET /v1/speed/voices endpoint to retrieve the list of available voices and languages.
76
+ Valid values: "en-US", "en-UK", "es-ES", etc. Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
76
77
 
77
78
  pitch : typing.Optional[int]
78
79
  Pitch of the voiceover
@@ -209,6 +210,157 @@ class TextToSpeechClient:
209
210
  raise ApiError(status_code=_response.status_code, body=_response.text)
210
211
  raise ApiError(status_code=_response.status_code, body=_response_json)
211
212
 
213
+ def stream(
214
+ self,
215
+ *,
216
+ text: str,
217
+ voice_id: str,
218
+ channel_type: typing.Optional[str] = OMIT,
219
+ format: typing.Optional[str] = OMIT,
220
+ multi_native_locale: typing.Optional[str] = OMIT,
221
+ pitch: typing.Optional[int] = OMIT,
222
+ rate: typing.Optional[int] = OMIT,
223
+ sample_rate: typing.Optional[float] = OMIT,
224
+ style: typing.Optional[str] = OMIT,
225
+ variation: typing.Optional[int] = OMIT,
226
+ request_options: typing.Optional[RequestOptions] = None,
227
+ ) -> typing.Iterator[bytes]:
228
+ """
229
+ Returns a streaming output of generated audio
230
+
231
+ Parameters
232
+ ----------
233
+ text : str
234
+ The text that is to be synthesised. e.g. 'Hello there [pause 1s] friend'
235
+
236
+ voice_id : str
237
+ Use the GET /v1/speech/voices api to find supported voiceIds.
238
+
239
+ channel_type : typing.Optional[str]
240
+ Valid values: STEREO, MONO
241
+
242
+ format : typing.Optional[str]
243
+ Format of the generated audio file. Valid values: MP3, WAV
244
+
245
+ multi_native_locale : typing.Optional[str]
246
+ Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
247
+ Valid values: "en-US", "en-UK", "es-ES", etc. Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
248
+
249
+ pitch : typing.Optional[int]
250
+ Pitch of the voiceover
251
+
252
+ rate : typing.Optional[int]
253
+ Speed of the voiceover
254
+
255
+ sample_rate : typing.Optional[float]
256
+ Valid values are 8000, 24000, 44100, 48000
257
+
258
+ style : typing.Optional[str]
259
+ The voice style to be used for voiceover generation.
260
+
261
+ variation : typing.Optional[int]
262
+ Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
263
+
264
+ request_options : typing.Optional[RequestOptions]
265
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
266
+
267
+ Yields
268
+ ------
269
+ typing.Iterator[bytes]
270
+ Ok
271
+
272
+ Examples
273
+ --------
274
+ from murf import Murf
275
+
276
+ client = Murf(
277
+ api_key="YOUR_API_KEY",
278
+ )
279
+ client.text_to_speech.stream()
280
+ """
281
+ with self._client_wrapper.httpx_client.stream(
282
+ "v1/speech/stream",
283
+ method="POST",
284
+ json={
285
+ "channelType": channel_type,
286
+ "format": format,
287
+ "multiNativeLocale": multi_native_locale,
288
+ "pitch": pitch,
289
+ "rate": rate,
290
+ "sampleRate": sample_rate,
291
+ "style": style,
292
+ "text": text,
293
+ "variation": variation,
294
+ "voiceId": voice_id,
295
+ },
296
+ headers={
297
+ "content-type": "application/json",
298
+ },
299
+ request_options=request_options,
300
+ omit=OMIT,
301
+ ) as _response:
302
+ try:
303
+ if 200 <= _response.status_code < 300:
304
+ _chunk_size = request_options.get("chunk_size", None) if request_options is not None else None
305
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
306
+ yield _chunk
307
+ return
308
+ _response.read()
309
+ if _response.status_code == 400:
310
+ raise BadRequestError(
311
+ typing.cast(
312
+ typing.Optional[typing.Any],
313
+ parse_obj_as(
314
+ type_=typing.Optional[typing.Any], # type: ignore
315
+ object_=_response.json(),
316
+ ),
317
+ )
318
+ )
319
+ if _response.status_code == 402:
320
+ raise PaymentRequiredError(
321
+ typing.cast(
322
+ typing.Optional[typing.Any],
323
+ parse_obj_as(
324
+ type_=typing.Optional[typing.Any], # type: ignore
325
+ object_=_response.json(),
326
+ ),
327
+ )
328
+ )
329
+ if _response.status_code == 403:
330
+ raise ForbiddenError(
331
+ typing.cast(
332
+ typing.Optional[typing.Any],
333
+ parse_obj_as(
334
+ type_=typing.Optional[typing.Any], # type: ignore
335
+ object_=_response.json(),
336
+ ),
337
+ )
338
+ )
339
+ if _response.status_code == 500:
340
+ raise InternalServerError(
341
+ typing.cast(
342
+ typing.Optional[typing.Any],
343
+ parse_obj_as(
344
+ type_=typing.Optional[typing.Any], # type: ignore
345
+ object_=_response.json(),
346
+ ),
347
+ )
348
+ )
349
+ if _response.status_code == 503:
350
+ raise ServiceUnavailableError(
351
+ typing.cast(
352
+ typing.Optional[typing.Any],
353
+ parse_obj_as(
354
+ type_=typing.Optional[typing.Any], # type: ignore
355
+ object_=_response.json(),
356
+ ),
357
+ )
358
+ )
359
+ _response_json = _response.json()
360
+ except JSONDecodeError:
361
+ raise ApiError(status_code=_response.status_code, body=_response.text)
362
+ raise ApiError(status_code=_response.status_code, body=_response_json)
363
+
212
364
  def get_voices(
213
365
  self, *, token: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
214
366
  ) -> typing.List[ApiVoice]:
@@ -331,6 +483,7 @@ class AsyncTextToSpeechClient:
331
483
  The text that is to be synthesised. e.g. 'Hello there [pause 1s] friend'
332
484
 
333
485
  voice_id : str
486
+ Use the GET /v1/speech/voices api to find supported voiceIds.
334
487
 
335
488
  audio_duration : typing.Optional[float]
336
489
  This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
@@ -349,7 +502,7 @@ class AsyncTextToSpeechClient:
349
502
 
350
503
  multi_native_locale : typing.Optional[str]
351
504
  Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
352
- Valid values: "en-US", "en-UK", "es-ES", etc. Use the GET /v1/speed/voices endpoint to retrieve the list of available voices and languages.
505
+ Valid values: "en-US", "en-UK", "es-ES", etc. Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
353
506
 
354
507
  pitch : typing.Optional[int]
355
508
  Pitch of the voiceover
@@ -494,6 +647,165 @@ class AsyncTextToSpeechClient:
494
647
  raise ApiError(status_code=_response.status_code, body=_response.text)
495
648
  raise ApiError(status_code=_response.status_code, body=_response_json)
496
649
 
650
+ async def stream(
651
+ self,
652
+ *,
653
+ text: str,
654
+ voice_id: str,
655
+ channel_type: typing.Optional[str] = OMIT,
656
+ format: typing.Optional[str] = OMIT,
657
+ multi_native_locale: typing.Optional[str] = OMIT,
658
+ pitch: typing.Optional[int] = OMIT,
659
+ rate: typing.Optional[int] = OMIT,
660
+ sample_rate: typing.Optional[float] = OMIT,
661
+ style: typing.Optional[str] = OMIT,
662
+ variation: typing.Optional[int] = OMIT,
663
+ request_options: typing.Optional[RequestOptions] = None,
664
+ ) -> typing.AsyncIterator[bytes]:
665
+ """
666
+ Returns a streaming output of generated audio
667
+
668
+ Parameters
669
+ ----------
670
+ text : str
671
+ The text that is to be synthesised. e.g. 'Hello there [pause 1s] friend'
672
+
673
+ voice_id : str
674
+ Use the GET /v1/speech/voices api to find supported voiceIds.
675
+
676
+ channel_type : typing.Optional[str]
677
+ Valid values: STEREO, MONO
678
+
679
+ format : typing.Optional[str]
680
+ Format of the generated audio file. Valid values: MP3, WAV
681
+
682
+ multi_native_locale : typing.Optional[str]
683
+ Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
684
+ Valid values: "en-US", "en-UK", "es-ES", etc. Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
685
+
686
+ pitch : typing.Optional[int]
687
+ Pitch of the voiceover
688
+
689
+ rate : typing.Optional[int]
690
+ Speed of the voiceover
691
+
692
+ sample_rate : typing.Optional[float]
693
+ Valid values are 8000, 24000, 44100, 48000
694
+
695
+ style : typing.Optional[str]
696
+ The voice style to be used for voiceover generation.
697
+
698
+ variation : typing.Optional[int]
699
+ Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
700
+
701
+ request_options : typing.Optional[RequestOptions]
702
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
703
+
704
+ Yields
705
+ ------
706
+ typing.AsyncIterator[bytes]
707
+ Ok
708
+
709
+ Examples
710
+ --------
711
+ import asyncio
712
+
713
+ from murf import AsyncMurf
714
+
715
+ client = AsyncMurf(
716
+ api_key="YOUR_API_KEY",
717
+ )
718
+
719
+
720
+ async def main() -> None:
721
+ await client.text_to_speech.stream()
722
+
723
+
724
+ asyncio.run(main())
725
+ """
726
+ async with self._client_wrapper.httpx_client.stream(
727
+ "v1/speech/stream",
728
+ method="POST",
729
+ json={
730
+ "channelType": channel_type,
731
+ "format": format,
732
+ "multiNativeLocale": multi_native_locale,
733
+ "pitch": pitch,
734
+ "rate": rate,
735
+ "sampleRate": sample_rate,
736
+ "style": style,
737
+ "text": text,
738
+ "variation": variation,
739
+ "voiceId": voice_id,
740
+ },
741
+ headers={
742
+ "content-type": "application/json",
743
+ },
744
+ request_options=request_options,
745
+ omit=OMIT,
746
+ ) as _response:
747
+ try:
748
+ if 200 <= _response.status_code < 300:
749
+ _chunk_size = request_options.get("chunk_size", None) if request_options is not None else None
750
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
751
+ yield _chunk
752
+ return
753
+ await _response.aread()
754
+ if _response.status_code == 400:
755
+ raise BadRequestError(
756
+ typing.cast(
757
+ typing.Optional[typing.Any],
758
+ parse_obj_as(
759
+ type_=typing.Optional[typing.Any], # type: ignore
760
+ object_=_response.json(),
761
+ ),
762
+ )
763
+ )
764
+ if _response.status_code == 402:
765
+ raise PaymentRequiredError(
766
+ typing.cast(
767
+ typing.Optional[typing.Any],
768
+ parse_obj_as(
769
+ type_=typing.Optional[typing.Any], # type: ignore
770
+ object_=_response.json(),
771
+ ),
772
+ )
773
+ )
774
+ if _response.status_code == 403:
775
+ raise ForbiddenError(
776
+ typing.cast(
777
+ typing.Optional[typing.Any],
778
+ parse_obj_as(
779
+ type_=typing.Optional[typing.Any], # type: ignore
780
+ object_=_response.json(),
781
+ ),
782
+ )
783
+ )
784
+ if _response.status_code == 500:
785
+ raise InternalServerError(
786
+ typing.cast(
787
+ typing.Optional[typing.Any],
788
+ parse_obj_as(
789
+ type_=typing.Optional[typing.Any], # type: ignore
790
+ object_=_response.json(),
791
+ ),
792
+ )
793
+ )
794
+ if _response.status_code == 503:
795
+ raise ServiceUnavailableError(
796
+ typing.cast(
797
+ typing.Optional[typing.Any],
798
+ parse_obj_as(
799
+ type_=typing.Optional[typing.Any], # type: ignore
800
+ object_=_response.json(),
801
+ ),
802
+ )
803
+ )
804
+ _response_json = _response.json()
805
+ except JSONDecodeError:
806
+ raise ApiError(status_code=_response.status_code, body=_response.text)
807
+ raise ApiError(status_code=_response.status_code, body=_response_json)
808
+
497
809
  async def get_voices(
498
810
  self, *, token: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
499
811
  ) -> typing.List[ApiVoice]:
murf/types/__init__.py CHANGED
@@ -8,6 +8,7 @@ from .api_project_response_dubbing_type import ApiProjectResponseDubbingType
8
8
  from .api_voice import ApiVoice
9
9
  from .api_voice_gender import ApiVoiceGender
10
10
  from .auth_token_response import AuthTokenResponse
11
+ from .character_count import CharacterCount
11
12
  from .dub_api_detail_response import DubApiDetailResponse
12
13
  from .dub_job_status_response import DubJobStatusResponse
13
14
  from .form_data_content_disposition import FormDataContentDisposition
@@ -15,11 +16,15 @@ from .generate_speech_response import GenerateSpeechResponse
15
16
  from .group_api_project_response import GroupApiProjectResponse
16
17
  from .locale_response import LocaleResponse
17
18
  from .locale_response_supports_item import LocaleResponseSupportsItem
19
+ from .metadata import Metadata
20
+ from .murf_api_translation_response import MurfApiTranslationResponse
18
21
  from .pronunciation_detail import PronunciationDetail
19
22
  from .pronunciation_detail_type import PronunciationDetailType
20
23
  from .source_locale_response import SourceLocaleResponse
24
+ from .speech_to_speech_response import SpeechToSpeechResponse
21
25
  from .style_details import StyleDetails
22
- from .word_duration import WordDuration
26
+ from .translation import Translation
27
+ from .word_duration_response import WordDurationResponse
23
28
 
24
29
  __all__ = [
25
30
  "ApiJobResponse",
@@ -30,6 +35,7 @@ __all__ = [
30
35
  "ApiVoice",
31
36
  "ApiVoiceGender",
32
37
  "AuthTokenResponse",
38
+ "CharacterCount",
33
39
  "DubApiDetailResponse",
34
40
  "DubJobStatusResponse",
35
41
  "FormDataContentDisposition",
@@ -37,9 +43,13 @@ __all__ = [
37
43
  "GroupApiProjectResponse",
38
44
  "LocaleResponse",
39
45
  "LocaleResponseSupportsItem",
46
+ "Metadata",
47
+ "MurfApiTranslationResponse",
40
48
  "PronunciationDetail",
41
49
  "PronunciationDetailType",
42
50
  "SourceLocaleResponse",
51
+ "SpeechToSpeechResponse",
43
52
  "StyleDetails",
44
- "WordDuration",
53
+ "Translation",
54
+ "WordDurationResponse",
45
55
  ]
@@ -0,0 +1,20 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import typing
5
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
+ import pydantic
7
+
8
+
9
+ class CharacterCount(UniversalBaseModel):
10
+ total_source_text_length: typing.Optional[int] = None
11
+ total_translated_text_length: typing.Optional[int] = None
12
+
13
+ if IS_PYDANTIC_V2:
14
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
15
+ else:
16
+
17
+ class Config:
18
+ frozen = True
19
+ smart_union = True
20
+ extra = pydantic.Extra.allow
@@ -2,37 +2,33 @@
2
2
 
3
3
  from ..core.pydantic_utilities import UniversalBaseModel
4
4
  import typing_extensions
5
- import typing
6
5
  from ..core.serialization import FieldMetadata
7
6
  import pydantic
8
- from .word_duration import WordDuration
7
+ import typing
8
+ from .word_duration_response import WordDurationResponse
9
9
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
10
10
 
11
11
 
12
12
  class GenerateSpeechResponse(UniversalBaseModel):
13
- audio_file: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="audioFile")] = None
14
- audio_length_in_seconds: typing_extensions.Annotated[
15
- typing.Optional[float], FieldMetadata(alias="audioLengthInSeconds")
16
- ] = None
17
- consumed_character_count: typing_extensions.Annotated[
18
- typing.Optional[int], FieldMetadata(alias="consumedCharacterCount")
19
- ] = pydantic.Field(default=None)
13
+ audio_file: typing_extensions.Annotated[str, FieldMetadata(alias="audioFile")]
14
+ audio_length_in_seconds: typing_extensions.Annotated[float, FieldMetadata(alias="audioLengthInSeconds")]
15
+ consumed_character_count: typing_extensions.Annotated[int, FieldMetadata(alias="consumedCharacterCount")] = (
16
+ pydantic.Field()
17
+ )
20
18
  """
21
19
  Number of characters consumed so far in the current billing cycle.
22
20
  """
23
21
 
24
22
  encoded_audio: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="encodedAudio")] = None
25
- remaining_character_count: typing_extensions.Annotated[
26
- typing.Optional[int], FieldMetadata(alias="remainingCharacterCount")
27
- ] = pydantic.Field(default=None)
23
+ remaining_character_count: typing_extensions.Annotated[int, FieldMetadata(alias="remainingCharacterCount")] = (
24
+ pydantic.Field()
25
+ )
28
26
  """
29
27
  Remaining number of characters available for synthesis in the current billing cycle.
30
28
  """
31
29
 
32
30
  warning: typing.Optional[str] = None
33
- word_durations: typing_extensions.Annotated[
34
- typing.Optional[typing.List[WordDuration]], FieldMetadata(alias="wordDurations")
35
- ] = None
31
+ word_durations: typing_extensions.Annotated[typing.List[WordDurationResponse], FieldMetadata(alias="wordDurations")]
36
32
 
37
33
  if IS_PYDANTIC_V2:
38
34
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
murf/types/metadata.py ADDED
@@ -0,0 +1,22 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import typing
5
+ from .character_count import CharacterCount
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
+ import pydantic
8
+
9
+
10
+ class Metadata(UniversalBaseModel):
11
+ character_count: typing.Optional[CharacterCount] = None
12
+ credits_used: typing.Optional[float] = None
13
+ target_language: typing.Optional[str] = None
14
+
15
+ if IS_PYDANTIC_V2:
16
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
17
+ else:
18
+
19
+ class Config:
20
+ frozen = True
21
+ smart_union = True
22
+ extra = pydantic.Extra.allow
@@ -0,0 +1,22 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import typing
5
+ from .metadata import Metadata
6
+ from .translation import Translation
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
+ import pydantic
9
+
10
+
11
+ class MurfApiTranslationResponse(UniversalBaseModel):
12
+ metadata: typing.Optional[Metadata] = None
13
+ translations: typing.Optional[typing.List[Translation]] = None
14
+
15
+ if IS_PYDANTIC_V2:
16
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
17
+ else:
18
+
19
+ class Config:
20
+ frozen = True
21
+ smart_union = True
22
+ extra = pydantic.Extra.allow
@@ -0,0 +1,47 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import pydantic
5
+ import typing
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
+
8
+
9
+ class SpeechToSpeechResponse(UniversalBaseModel):
10
+ audio_file: str = pydantic.Field()
11
+ """
12
+ The URL or path of the generated audio file.
13
+ """
14
+
15
+ audio_length_in_seconds: float = pydantic.Field()
16
+ """
17
+ Length of the generated audio in seconds.
18
+ """
19
+
20
+ encoded_audio: typing.Optional[str] = pydantic.Field(default=None)
21
+ """
22
+ Base64 encoded string of the generated audio. Used when audio is returned directly in the response.
23
+ """
24
+
25
+ remaining_character_count: int = pydantic.Field()
26
+ """
27
+ Remaining number of characters available for synthesis in the current billing cycle.
28
+ """
29
+
30
+ transcription: typing.Optional[str] = pydantic.Field(default=None)
31
+ """
32
+ Transcript of the generated audio, if transcription was requested.
33
+ """
34
+
35
+ warning: typing.Optional[str] = pydantic.Field(default=None)
36
+ """
37
+ Any warning or informational message related to the audio generation process.
38
+ """
39
+
40
+ if IS_PYDANTIC_V2:
41
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
42
+ else:
43
+
44
+ class Config:
45
+ frozen = True
46
+ smart_union = True
47
+ extra = pydantic.Extra.allow
@@ -0,0 +1,20 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import typing
5
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
+ import pydantic
7
+
8
+
9
+ class Translation(UniversalBaseModel):
10
+ source_text: typing.Optional[str] = None
11
+ translated_text: typing.Optional[str] = None
12
+
13
+ if IS_PYDANTIC_V2:
14
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
15
+ else:
16
+
17
+ class Config:
18
+ frozen = True
19
+ smart_union = True
20
+ extra = pydantic.Extra.allow
@@ -4,19 +4,33 @@ from ..core.pydantic_utilities import UniversalBaseModel
4
4
  import typing_extensions
5
5
  import typing
6
6
  from ..core.serialization import FieldMetadata
7
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
7
  import pydantic
8
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
9
9
 
10
10
 
11
- class WordDuration(UniversalBaseModel):
11
+ class WordDurationResponse(UniversalBaseModel):
12
12
  end_ms: typing_extensions.Annotated[typing.Optional[int], FieldMetadata(alias="endMs")] = None
13
13
  pitch_scale_maximum: typing_extensions.Annotated[
14
14
  typing.Optional[float], FieldMetadata(alias="pitchScaleMaximum")
15
- ] = None
15
+ ] = pydantic.Field(default=None)
16
+ """
17
+ This field has been deprecated.
18
+ """
19
+
16
20
  pitch_scale_minimum: typing_extensions.Annotated[
17
21
  typing.Optional[float], FieldMetadata(alias="pitchScaleMinimum")
18
- ] = None
19
- source_word_index: typing_extensions.Annotated[typing.Optional[int], FieldMetadata(alias="sourceWordIndex")] = None
22
+ ] = pydantic.Field(default=None)
23
+ """
24
+ This field has been deprecated.
25
+ """
26
+
27
+ source_word_index: typing_extensions.Annotated[typing.Optional[int], FieldMetadata(alias="sourceWordIndex")] = (
28
+ pydantic.Field(default=None)
29
+ )
30
+ """
31
+ This field has been deprecated.
32
+ """
33
+
20
34
  start_ms: typing_extensions.Annotated[typing.Optional[int], FieldMetadata(alias="startMs")] = None
21
35
  word: typing.Optional[str] = None
22
36
 
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+