murf 2.0.0__tar.gz → 2.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of murf might be problematic. Click here for more details.

Files changed (102) hide show
  1. {murf-2.0.0 → murf-2.0.2}/PKG-INFO +1 -1
  2. {murf-2.0.0 → murf-2.0.2}/pyproject.toml +1 -1
  3. {murf-2.0.0 → murf-2.0.2}/src/murf/__init__.py +14 -8
  4. {murf-2.0.0 → murf-2.0.2}/src/murf/core/client_wrapper.py +1 -1
  5. {murf-2.0.0 → murf-2.0.2}/src/murf/core/http_client.py +6 -6
  6. {murf-2.0.0 → murf-2.0.2}/src/murf/core/pydantic_utilities.py +2 -2
  7. {murf-2.0.0 → murf-2.0.2}/src/murf/stream_input/types/send_message.py +2 -2
  8. {murf-2.0.0 → murf-2.0.2}/src/murf/text_to_speech/client.py +50 -4
  9. {murf-2.0.0 → murf-2.0.2}/src/murf/types/__init__.py +20 -10
  10. {murf-2.0.0 → murf-2.0.2}/src/murf/types/final_output.py +1 -1
  11. {murf-2.0.0 → murf-2.0.2}/src/murf/types/send_text.py +3 -0
  12. murf-2.0.2/src/murf/types/send_text_voice_config.py +55 -0
  13. murf-2.0.0/src/murf/types/set_voice_configuration_voice_config_pronunciation_dictionary_value.py → murf-2.0.2/src/murf/types/send_text_voice_config_pronunciation_dictionary_value.py +4 -4
  14. murf-2.0.2/src/murf/types/send_text_voice_config_pronunciation_dictionary_value_type.py +5 -0
  15. murf-2.0.2/src/murf/types/set_voice_configuration_or_initialize_context.py +26 -0
  16. murf-2.0.0/src/murf/types/set_voice_configuration_voice_config.py → murf-2.0.2/src/murf/types/set_voice_configuration_or_initialize_context_voice_config.py +4 -4
  17. murf-2.0.2/src/murf/types/set_voice_configuration_or_initialize_context_voice_config_pronunciation_dictionary_value.py +30 -0
  18. murf-2.0.0/src/murf/types/set_voice_configuration_voice_config_pronunciation_dictionary_value_type.py → murf-2.0.2/src/murf/types/set_voice_configuration_or_initialize_context_voice_config_pronunciation_dictionary_value_type.py +1 -1
  19. {murf-2.0.0 → murf-2.0.2}/src/murf/voice_changer/client.py +14 -10
  20. murf-2.0.0/src/murf/types/set_voice_configuration.py +0 -20
  21. {murf-2.0.0 → murf-2.0.2}/LICENSE +0 -0
  22. {murf-2.0.0 → murf-2.0.2}/README.md +0 -0
  23. {murf-2.0.0 → murf-2.0.2}/src/murf/auth/__init__.py +0 -0
  24. {murf-2.0.0 → murf-2.0.2}/src/murf/auth/client.py +0 -0
  25. {murf-2.0.0 → murf-2.0.2}/src/murf/base_client.py +0 -0
  26. {murf-2.0.0 → murf-2.0.2}/src/murf/client.py +0 -0
  27. {murf-2.0.0 → murf-2.0.2}/src/murf/core/__init__.py +0 -0
  28. {murf-2.0.0 → murf-2.0.2}/src/murf/core/api_error.py +0 -0
  29. {murf-2.0.0 → murf-2.0.2}/src/murf/core/datetime_utils.py +0 -0
  30. {murf-2.0.0 → murf-2.0.2}/src/murf/core/file.py +0 -0
  31. {murf-2.0.0 → murf-2.0.2}/src/murf/core/jsonable_encoder.py +0 -0
  32. {murf-2.0.0 → murf-2.0.2}/src/murf/core/query_encoder.py +0 -0
  33. {murf-2.0.0 → murf-2.0.2}/src/murf/core/remove_none_from_dict.py +0 -0
  34. {murf-2.0.0 → murf-2.0.2}/src/murf/core/request_options.py +0 -0
  35. {murf-2.0.0 → murf-2.0.2}/src/murf/core/serialization.py +0 -0
  36. {murf-2.0.0 → murf-2.0.2}/src/murf/core/unchecked_base_model.py +0 -0
  37. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/__init__.py +0 -0
  38. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/client.py +0 -0
  39. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/jobs/__init__.py +0 -0
  40. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/jobs/client.py +0 -0
  41. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/jobs/types/__init__.py +0 -0
  42. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/jobs/types/jobs_create_request_priority.py +0 -0
  43. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/jobs/types/jobs_create_with_project_id_request_priority.py +0 -0
  44. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/languages/__init__.py +0 -0
  45. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/languages/client.py +0 -0
  46. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/projects/__init__.py +0 -0
  47. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/projects/client.py +0 -0
  48. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/projects/types/__init__.py +0 -0
  49. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing/projects/types/api_create_project_request_dubbing_type.py +0 -0
  50. {murf-2.0.0 → murf-2.0.2}/src/murf/dubbing_client.py +0 -0
  51. {murf-2.0.0 → murf-2.0.2}/src/murf/environment.py +0 -0
  52. {murf-2.0.0 → murf-2.0.2}/src/murf/errors/__init__.py +0 -0
  53. {murf-2.0.0 → murf-2.0.2}/src/murf/errors/bad_request_error.py +0 -0
  54. {murf-2.0.0 → murf-2.0.2}/src/murf/errors/forbidden_error.py +0 -0
  55. {murf-2.0.0 → murf-2.0.2}/src/murf/errors/internal_server_error.py +0 -0
  56. {murf-2.0.0 → murf-2.0.2}/src/murf/errors/payment_required_error.py +0 -0
  57. {murf-2.0.0 → murf-2.0.2}/src/murf/errors/service_unavailable_error.py +0 -0
  58. {murf-2.0.0 → murf-2.0.2}/src/murf/errors/unauthorized_error.py +0 -0
  59. {murf-2.0.0 → murf-2.0.2}/src/murf/py.typed +0 -0
  60. {murf-2.0.0 → murf-2.0.2}/src/murf/stream_input/__init__.py +0 -0
  61. {murf-2.0.0 → murf-2.0.2}/src/murf/stream_input/types/__init__.py +0 -0
  62. {murf-2.0.0 → murf-2.0.2}/src/murf/stream_input/types/receive_message.py +0 -0
  63. {murf-2.0.0 → murf-2.0.2}/src/murf/text/__init__.py +0 -0
  64. {murf-2.0.0 → murf-2.0.2}/src/murf/text/client.py +0 -0
  65. {murf-2.0.0 → murf-2.0.2}/src/murf/text_to_speech/__init__.py +0 -0
  66. {murf-2.0.0 → murf-2.0.2}/src/murf/text_to_speech/types/__init__.py +0 -0
  67. {murf-2.0.0 → murf-2.0.2}/src/murf/text_to_speech/types/generate_speech_request_model_version.py +0 -0
  68. {murf-2.0.0 → murf-2.0.2}/src/murf/types/api_job_response.py +0 -0
  69. {murf-2.0.0 → murf-2.0.2}/src/murf/types/api_job_response_dubbing_type.py +0 -0
  70. {murf-2.0.0 → murf-2.0.2}/src/murf/types/api_job_response_priority.py +0 -0
  71. {murf-2.0.0 → murf-2.0.2}/src/murf/types/api_project_response.py +0 -0
  72. {murf-2.0.0 → murf-2.0.2}/src/murf/types/api_project_response_dubbing_type.py +0 -0
  73. {murf-2.0.0 → murf-2.0.2}/src/murf/types/api_voice.py +0 -0
  74. {murf-2.0.0 → murf-2.0.2}/src/murf/types/api_voice_gender.py +0 -0
  75. {murf-2.0.0 → murf-2.0.2}/src/murf/types/audio_output.py +0 -0
  76. {murf-2.0.0 → murf-2.0.2}/src/murf/types/auth_token_response.py +0 -0
  77. {murf-2.0.0 → murf-2.0.2}/src/murf/types/character_count.py +0 -0
  78. {murf-2.0.0 → murf-2.0.2}/src/murf/types/clear_context.py +0 -0
  79. {murf-2.0.0 → murf-2.0.2}/src/murf/types/dub_api_detail_response.py +0 -0
  80. {murf-2.0.0 → murf-2.0.2}/src/murf/types/dub_job_status_response.py +0 -0
  81. {murf-2.0.0 → murf-2.0.2}/src/murf/types/form_data_content_disposition.py +0 -0
  82. {murf-2.0.0 → murf-2.0.2}/src/murf/types/generate_speech_response.py +0 -0
  83. {murf-2.0.0 → murf-2.0.2}/src/murf/types/group_api_project_response.py +0 -0
  84. {murf-2.0.0 → murf-2.0.2}/src/murf/types/locale_response.py +0 -0
  85. {murf-2.0.0 → murf-2.0.2}/src/murf/types/locale_response_supports_item.py +0 -0
  86. {murf-2.0.0 → murf-2.0.2}/src/murf/types/metadata.py +0 -0
  87. {murf-2.0.0 → murf-2.0.2}/src/murf/types/murf_api_translation_response.py +0 -0
  88. {murf-2.0.0 → murf-2.0.2}/src/murf/types/pronunciation_detail.py +0 -0
  89. {murf-2.0.0 → murf-2.0.2}/src/murf/types/pronunciation_detail_type.py +0 -0
  90. {murf-2.0.0 → murf-2.0.2}/src/murf/types/set_advanced_settings.py +0 -0
  91. {murf-2.0.0 → murf-2.0.2}/src/murf/types/source_locale_response.py +0 -0
  92. {murf-2.0.0 → murf-2.0.2}/src/murf/types/speech_to_speech_response.py +0 -0
  93. {murf-2.0.0 → murf-2.0.2}/src/murf/types/style_details.py +0 -0
  94. {murf-2.0.0 → murf-2.0.2}/src/murf/types/translation.py +0 -0
  95. {murf-2.0.0 → murf-2.0.2}/src/murf/types/tts_request_both_payload.py +0 -0
  96. {murf-2.0.0 → murf-2.0.2}/src/murf/types/tts_request_both_payload_voice_config.py +0 -0
  97. {murf-2.0.0 → murf-2.0.2}/src/murf/types/tts_request_both_payload_voice_config_pronunciation_dictionary.py +0 -0
  98. {murf-2.0.0 → murf-2.0.2}/src/murf/types/tts_request_both_payload_voice_config_pronunciation_dictionary_guess.py +0 -0
  99. {murf-2.0.0 → murf-2.0.2}/src/murf/types/word_duration_response.py +0 -0
  100. {murf-2.0.0 → murf-2.0.2}/src/murf/utils.py +0 -0
  101. {murf-2.0.0 → murf-2.0.2}/src/murf/version.py +0 -0
  102. {murf-2.0.0 → murf-2.0.2}/src/murf/voice_changer/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: murf
3
- Version: 2.0.0
3
+ Version: 2.0.2
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -3,7 +3,7 @@ name = "murf"
3
3
 
4
4
  [tool.poetry]
5
5
  name = "murf"
6
- version = "2.0.0"
6
+ version = "2.0.2"
7
7
  description = ""
8
8
  readme = "README.md"
9
9
  authors = []
@@ -25,11 +25,14 @@ from .types import (
25
25
  PronunciationDetail,
26
26
  PronunciationDetailType,
27
27
  SendText,
28
+ SendTextVoiceConfig,
29
+ SendTextVoiceConfigPronunciationDictionaryValue,
30
+ SendTextVoiceConfigPronunciationDictionaryValueType,
28
31
  SetAdvancedSettings,
29
- SetVoiceConfiguration,
30
- SetVoiceConfigurationVoiceConfig,
31
- SetVoiceConfigurationVoiceConfigPronunciationDictionaryValue,
32
- SetVoiceConfigurationVoiceConfigPronunciationDictionaryValueType,
32
+ SetVoiceConfigurationOrInitializeContext,
33
+ SetVoiceConfigurationOrInitializeContextVoiceConfig,
34
+ SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValue,
35
+ SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValueType,
33
36
  SourceLocaleResponse,
34
37
  SpeechToSpeechResponse,
35
38
  StyleDetails,
@@ -92,12 +95,15 @@ __all__ = [
92
95
  "ReceiveMessage",
93
96
  "SendMessage",
94
97
  "SendText",
98
+ "SendTextVoiceConfig",
99
+ "SendTextVoiceConfigPronunciationDictionaryValue",
100
+ "SendTextVoiceConfigPronunciationDictionaryValueType",
95
101
  "ServiceUnavailableError",
96
102
  "SetAdvancedSettings",
97
- "SetVoiceConfiguration",
98
- "SetVoiceConfigurationVoiceConfig",
99
- "SetVoiceConfigurationVoiceConfigPronunciationDictionaryValue",
100
- "SetVoiceConfigurationVoiceConfigPronunciationDictionaryValueType",
103
+ "SetVoiceConfigurationOrInitializeContext",
104
+ "SetVoiceConfigurationOrInitializeContextVoiceConfig",
105
+ "SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValue",
106
+ "SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValueType",
101
107
  "SourceLocaleResponse",
102
108
  "SpeechToSpeechResponse",
103
109
  "StyleDetails",
@@ -23,7 +23,7 @@ class BaseClientWrapper:
23
23
  headers: typing.Dict[str, str] = {
24
24
  "X-Fern-Language": "Python",
25
25
  "X-Fern-SDK-Name": "murf",
26
- "X-Fern-SDK-Version": "2.0.0",
26
+ "X-Fern-SDK-Version": "2.0.2",
27
27
  }
28
28
  if self._api_key is not None:
29
29
  headers["api-key"] = self._api_key
@@ -85,8 +85,8 @@ def _retry_timeout(response: httpx.Response, retries: int) -> float:
85
85
 
86
86
 
87
87
  def _should_retry(response: httpx.Response) -> bool:
88
- retriable_400s = [429, 408, 409]
89
- return response.status_code >= 500 or response.status_code in retriable_400s
88
+ retryable_400s = [429, 408, 409]
89
+ return response.status_code >= 500 or response.status_code in retryable_400s
90
90
 
91
91
 
92
92
  def remove_omit_from_dict(
@@ -183,7 +183,7 @@ class HttpClient:
183
183
  files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
184
184
  headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
185
185
  request_options: typing.Optional[RequestOptions] = None,
186
- retries: int = 0,
186
+ retries: int = 2,
187
187
  omit: typing.Optional[typing.Any] = None,
188
188
  ) -> httpx.Response:
189
189
  base_url = self.get_base_url(base_url)
@@ -269,7 +269,7 @@ class HttpClient:
269
269
  files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
270
270
  headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
271
271
  request_options: typing.Optional[RequestOptions] = None,
272
- retries: int = 0,
272
+ retries: int = 2,
273
273
  omit: typing.Optional[typing.Any] = None,
274
274
  ) -> typing.Iterator[httpx.Response]:
275
275
  base_url = self.get_base_url(base_url)
@@ -359,7 +359,7 @@ class AsyncHttpClient:
359
359
  files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
360
360
  headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
361
361
  request_options: typing.Optional[RequestOptions] = None,
362
- retries: int = 0,
362
+ retries: int = 2,
363
363
  omit: typing.Optional[typing.Any] = None,
364
364
  ) -> httpx.Response:
365
365
  base_url = self.get_base_url(base_url)
@@ -445,7 +445,7 @@ class AsyncHttpClient:
445
445
  files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None,
446
446
  headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
447
447
  request_options: typing.Optional[RequestOptions] = None,
448
- retries: int = 0,
448
+ retries: int = 2,
449
449
  omit: typing.Optional[typing.Any] = None,
450
450
  ) -> typing.AsyncIterator[httpx.Response]:
451
451
  base_url = self.get_base_url(base_url)
@@ -79,7 +79,7 @@ def to_jsonable_with_fallback(
79
79
  class UniversalBaseModel(pydantic.BaseModel):
80
80
  if IS_PYDANTIC_V2:
81
81
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
82
- # Allow fields begining with `model_` to be used in the model
82
+ # Allow fields beginning with `model_` to be used in the model
83
83
  protected_namespaces=(),
84
84
  ) # type: ignore # Pydantic v2
85
85
 
@@ -128,7 +128,7 @@ class UniversalBaseModel(pydantic.BaseModel):
128
128
  Override the default dict method to `exclude_unset` by default. This function patches
129
129
  `exclude_unset` to work include fields within non-None default values.
130
130
  """
131
- # Note: the logic here is multi-plexed given the levers exposed in Pydantic V1 vs V2
131
+ # Note: the logic here is multiplexed given the levers exposed in Pydantic V1 vs V2
132
132
  # Pydantic V1's .dict can be extremely slow, so we do not want to call it twice.
133
133
  #
134
134
  # We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models
@@ -1,9 +1,9 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  import typing
4
- from ...types.set_voice_configuration import SetVoiceConfiguration
4
+ from ...types.set_voice_configuration_or_initialize_context import SetVoiceConfigurationOrInitializeContext
5
5
  from ...types.send_text import SendText
6
6
  from ...types.set_advanced_settings import SetAdvancedSettings
7
7
  from ...types.clear_context import ClearContext
8
8
 
9
- SendMessage = typing.Union[SetVoiceConfiguration, SendText, SetAdvancedSettings, ClearContext]
9
+ SendMessage = typing.Union[SetVoiceConfigurationOrInitializeContext, SendText, SetAdvancedSettings, ClearContext]
@@ -34,6 +34,7 @@ class TextToSpeechClient:
34
34
  audio_duration: typing.Optional[float] = OMIT,
35
35
  channel_type: typing.Optional[str] = OMIT,
36
36
  encode_as_base_64: typing.Optional[bool] = OMIT,
37
+ encoded_as_base_64_with_zero_retention: typing.Optional[bool] = OMIT,
37
38
  format: typing.Optional[str] = OMIT,
38
39
  model_version: typing.Optional[GenerateSpeechRequestModelVersion] = OMIT,
39
40
  multi_native_locale: typing.Optional[str] = OMIT,
@@ -43,6 +44,7 @@ class TextToSpeechClient:
43
44
  sample_rate: typing.Optional[float] = OMIT,
44
45
  style: typing.Optional[str] = OMIT,
45
46
  variation: typing.Optional[int] = OMIT,
47
+ word_durations_as_original_text: typing.Optional[bool] = OMIT,
46
48
  request_options: typing.Optional[RequestOptions] = None,
47
49
  ) -> GenerateSpeechResponse:
48
50
  """
@@ -65,8 +67,11 @@ class TextToSpeechClient:
65
67
  encode_as_base_64 : typing.Optional[bool]
66
68
  Set to true to receive audio in response as a Base64 encoded string instead of a url.
67
69
 
70
+ encoded_as_base_64_with_zero_retention : typing.Optional[bool]
71
+ Set to true to receive audio in response as a Base64 encoded string with zero data retention
72
+
68
73
  format : typing.Optional[str]
69
- Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW
74
+ Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW, PCM, OGG
70
75
 
71
76
  model_version : typing.Optional[GenerateSpeechRequestModelVersion]
72
77
  Valid values: GEN1, GEN2. Use GEN2 to generate audio using new and advanced model. Outputs from Gen 2 will sound better, but different from the old model
@@ -97,6 +102,9 @@ class TextToSpeechClient:
97
102
  variation : typing.Optional[int]
98
103
  Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
99
104
 
105
+ word_durations_as_original_text : typing.Optional[bool]
106
+ If set to true, the word durations in response will return words as the original input text. (English only)
107
+
100
108
  request_options : typing.Optional[RequestOptions]
101
109
  Request-specific configuration.
102
110
 
@@ -125,6 +133,7 @@ class TextToSpeechClient:
125
133
  "audioDuration": audio_duration,
126
134
  "channelType": channel_type,
127
135
  "encodeAsBase64": encode_as_base_64,
136
+ "encodedAsBase64WithZeroRetention": encoded_as_base_64_with_zero_retention,
128
137
  "format": format,
129
138
  "modelVersion": model_version,
130
139
  "multiNativeLocale": multi_native_locale,
@@ -140,6 +149,7 @@ class TextToSpeechClient:
140
149
  "text": text,
141
150
  "variation": variation,
142
151
  "voiceId": voice_id,
152
+ "wordDurationsAsOriginalText": word_durations_as_original_text,
143
153
  },
144
154
  headers={
145
155
  "content-type": "application/json",
@@ -220,6 +230,7 @@ class TextToSpeechClient:
220
230
  format: typing.Optional[str] = OMIT,
221
231
  multi_native_locale: typing.Optional[str] = OMIT,
222
232
  pitch: typing.Optional[int] = OMIT,
233
+ pronunciation_dictionary: typing.Optional[typing.Dict[str, PronunciationDetail]] = OMIT,
223
234
  rate: typing.Optional[int] = OMIT,
224
235
  sample_rate: typing.Optional[float] = OMIT,
225
236
  style: typing.Optional[str] = OMIT,
@@ -241,7 +252,7 @@ class TextToSpeechClient:
241
252
  Valid values: STEREO, MONO
242
253
 
243
254
  format : typing.Optional[str]
244
- Format of the generated audio file. Valid values: MP3, WAV
255
+ Format of the generated audio file. Valid values: MP3, WAV, PCM
245
256
 
246
257
  multi_native_locale : typing.Optional[str]
247
258
  Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
@@ -250,6 +261,13 @@ class TextToSpeechClient:
250
261
  pitch : typing.Optional[int]
251
262
  Pitch of the voiceover
252
263
 
264
+ pronunciation_dictionary : typing.Optional[typing.Dict[str, PronunciationDetail]]
265
+ An object used to define custom pronunciations.
266
+
267
+ Example 1: {"live":{"type": "IPA", "pronunciation": "laɪv"}}.
268
+
269
+ Example 2: {"2022":{"type": "SAY_AS", "pronunciation": "twenty twenty two"}}
270
+
253
271
  rate : typing.Optional[int]
254
272
  Speed of the voiceover
255
273
 
@@ -288,6 +306,11 @@ class TextToSpeechClient:
288
306
  "format": format,
289
307
  "multiNativeLocale": multi_native_locale,
290
308
  "pitch": pitch,
309
+ "pronunciationDictionary": convert_and_respect_annotation_metadata(
310
+ object_=pronunciation_dictionary,
311
+ annotation=typing.Dict[str, PronunciationDetail],
312
+ direction="write",
313
+ ),
291
314
  "rate": rate,
292
315
  "sampleRate": sample_rate,
293
316
  "style": style,
@@ -466,6 +489,7 @@ class AsyncTextToSpeechClient:
466
489
  audio_duration: typing.Optional[float] = OMIT,
467
490
  channel_type: typing.Optional[str] = OMIT,
468
491
  encode_as_base_64: typing.Optional[bool] = OMIT,
492
+ encoded_as_base_64_with_zero_retention: typing.Optional[bool] = OMIT,
469
493
  format: typing.Optional[str] = OMIT,
470
494
  model_version: typing.Optional[GenerateSpeechRequestModelVersion] = OMIT,
471
495
  multi_native_locale: typing.Optional[str] = OMIT,
@@ -475,6 +499,7 @@ class AsyncTextToSpeechClient:
475
499
  sample_rate: typing.Optional[float] = OMIT,
476
500
  style: typing.Optional[str] = OMIT,
477
501
  variation: typing.Optional[int] = OMIT,
502
+ word_durations_as_original_text: typing.Optional[bool] = OMIT,
478
503
  request_options: typing.Optional[RequestOptions] = None,
479
504
  ) -> GenerateSpeechResponse:
480
505
  """
@@ -497,8 +522,11 @@ class AsyncTextToSpeechClient:
497
522
  encode_as_base_64 : typing.Optional[bool]
498
523
  Set to true to receive audio in response as a Base64 encoded string instead of a url.
499
524
 
525
+ encoded_as_base_64_with_zero_retention : typing.Optional[bool]
526
+ Set to true to receive audio in response as a Base64 encoded string with zero data retention
527
+
500
528
  format : typing.Optional[str]
501
- Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW
529
+ Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW, PCM, OGG
502
530
 
503
531
  model_version : typing.Optional[GenerateSpeechRequestModelVersion]
504
532
  Valid values: GEN1, GEN2. Use GEN2 to generate audio using new and advanced model. Outputs from Gen 2 will sound better, but different from the old model
@@ -529,6 +557,9 @@ class AsyncTextToSpeechClient:
529
557
  variation : typing.Optional[int]
530
558
  Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
531
559
 
560
+ word_durations_as_original_text : typing.Optional[bool]
561
+ If set to true, the word durations in response will return words as the original input text. (English only)
562
+
532
563
  request_options : typing.Optional[RequestOptions]
533
564
  Request-specific configuration.
534
565
 
@@ -565,6 +596,7 @@ class AsyncTextToSpeechClient:
565
596
  "audioDuration": audio_duration,
566
597
  "channelType": channel_type,
567
598
  "encodeAsBase64": encode_as_base_64,
599
+ "encodedAsBase64WithZeroRetention": encoded_as_base_64_with_zero_retention,
568
600
  "format": format,
569
601
  "modelVersion": model_version,
570
602
  "multiNativeLocale": multi_native_locale,
@@ -580,6 +612,7 @@ class AsyncTextToSpeechClient:
580
612
  "text": text,
581
613
  "variation": variation,
582
614
  "voiceId": voice_id,
615
+ "wordDurationsAsOriginalText": word_durations_as_original_text,
583
616
  },
584
617
  headers={
585
618
  "content-type": "application/json",
@@ -660,6 +693,7 @@ class AsyncTextToSpeechClient:
660
693
  format: typing.Optional[str] = OMIT,
661
694
  multi_native_locale: typing.Optional[str] = OMIT,
662
695
  pitch: typing.Optional[int] = OMIT,
696
+ pronunciation_dictionary: typing.Optional[typing.Dict[str, PronunciationDetail]] = OMIT,
663
697
  rate: typing.Optional[int] = OMIT,
664
698
  sample_rate: typing.Optional[float] = OMIT,
665
699
  style: typing.Optional[str] = OMIT,
@@ -681,7 +715,7 @@ class AsyncTextToSpeechClient:
681
715
  Valid values: STEREO, MONO
682
716
 
683
717
  format : typing.Optional[str]
684
- Format of the generated audio file. Valid values: MP3, WAV
718
+ Format of the generated audio file. Valid values: MP3, WAV, PCM
685
719
 
686
720
  multi_native_locale : typing.Optional[str]
687
721
  Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
@@ -690,6 +724,13 @@ class AsyncTextToSpeechClient:
690
724
  pitch : typing.Optional[int]
691
725
  Pitch of the voiceover
692
726
 
727
+ pronunciation_dictionary : typing.Optional[typing.Dict[str, PronunciationDetail]]
728
+ An object used to define custom pronunciations.
729
+
730
+ Example 1: {"live":{"type": "IPA", "pronunciation": "laɪv"}}.
731
+
732
+ Example 2: {"2022":{"type": "SAY_AS", "pronunciation": "twenty twenty two"}}
733
+
693
734
  rate : typing.Optional[int]
694
735
  Speed of the voiceover
695
736
 
@@ -736,6 +777,11 @@ class AsyncTextToSpeechClient:
736
777
  "format": format,
737
778
  "multiNativeLocale": multi_native_locale,
738
779
  "pitch": pitch,
780
+ "pronunciationDictionary": convert_and_respect_annotation_metadata(
781
+ object_=pronunciation_dictionary,
782
+ annotation=typing.Dict[str, PronunciationDetail],
783
+ direction="write",
784
+ ),
739
785
  "rate": rate,
740
786
  "sampleRate": sample_rate,
741
787
  "style": style,
@@ -24,14 +24,21 @@ from .murf_api_translation_response import MurfApiTranslationResponse
24
24
  from .pronunciation_detail import PronunciationDetail
25
25
  from .pronunciation_detail_type import PronunciationDetailType
26
26
  from .send_text import SendText
27
+ from .send_text_voice_config import SendTextVoiceConfig
28
+ from .send_text_voice_config_pronunciation_dictionary_value import SendTextVoiceConfigPronunciationDictionaryValue
29
+ from .send_text_voice_config_pronunciation_dictionary_value_type import (
30
+ SendTextVoiceConfigPronunciationDictionaryValueType,
31
+ )
27
32
  from .set_advanced_settings import SetAdvancedSettings
28
- from .set_voice_configuration import SetVoiceConfiguration
29
- from .set_voice_configuration_voice_config import SetVoiceConfigurationVoiceConfig
30
- from .set_voice_configuration_voice_config_pronunciation_dictionary_value import (
31
- SetVoiceConfigurationVoiceConfigPronunciationDictionaryValue,
33
+ from .set_voice_configuration_or_initialize_context import SetVoiceConfigurationOrInitializeContext
34
+ from .set_voice_configuration_or_initialize_context_voice_config import (
35
+ SetVoiceConfigurationOrInitializeContextVoiceConfig,
36
+ )
37
+ from .set_voice_configuration_or_initialize_context_voice_config_pronunciation_dictionary_value import (
38
+ SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValue,
32
39
  )
33
- from .set_voice_configuration_voice_config_pronunciation_dictionary_value_type import (
34
- SetVoiceConfigurationVoiceConfigPronunciationDictionaryValueType,
40
+ from .set_voice_configuration_or_initialize_context_voice_config_pronunciation_dictionary_value_type import (
41
+ SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValueType,
35
42
  )
36
43
  from .source_locale_response import SourceLocaleResponse
37
44
  from .speech_to_speech_response import SpeechToSpeechResponse
@@ -72,11 +79,14 @@ __all__ = [
72
79
  "PronunciationDetail",
73
80
  "PronunciationDetailType",
74
81
  "SendText",
82
+ "SendTextVoiceConfig",
83
+ "SendTextVoiceConfigPronunciationDictionaryValue",
84
+ "SendTextVoiceConfigPronunciationDictionaryValueType",
75
85
  "SetAdvancedSettings",
76
- "SetVoiceConfiguration",
77
- "SetVoiceConfigurationVoiceConfig",
78
- "SetVoiceConfigurationVoiceConfigPronunciationDictionaryValue",
79
- "SetVoiceConfigurationVoiceConfigPronunciationDictionaryValueType",
86
+ "SetVoiceConfigurationOrInitializeContext",
87
+ "SetVoiceConfigurationOrInitializeContextVoiceConfig",
88
+ "SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValue",
89
+ "SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValueType",
80
90
  "SourceLocaleResponse",
81
91
  "SpeechToSpeechResponse",
82
92
  "StyleDetails",
@@ -7,7 +7,7 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
7
 
8
8
 
9
9
  class FinalOutput(UncheckedBaseModel):
10
- is_final: bool = pydantic.Field()
10
+ final: bool = pydantic.Field()
11
11
  """
12
12
  Indicates if the audio is final, you will not receive audio param in this message
13
13
  """
@@ -3,6 +3,7 @@
3
3
  from ..core.unchecked_base_model import UncheckedBaseModel
4
4
  import pydantic
5
5
  import typing
6
+ from .send_text_voice_config import SendTextVoiceConfig
6
7
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
8
 
8
9
 
@@ -22,6 +23,8 @@ class SendText(UncheckedBaseModel):
22
23
  Indicates if this is the end of the text stream (optional)
23
24
  """
24
25
 
26
+ voice_config: typing.Optional[SendTextVoiceConfig] = None
27
+
25
28
  if IS_PYDANTIC_V2:
26
29
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
27
30
  else:
@@ -0,0 +1,55 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.unchecked_base_model import UncheckedBaseModel
4
+ import typing
5
+ import pydantic
6
+ from .send_text_voice_config_pronunciation_dictionary_value import SendTextVoiceConfigPronunciationDictionaryValue
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
+
9
+
10
+ class SendTextVoiceConfig(UncheckedBaseModel):
11
+ voice_id: typing.Optional[str] = pydantic.Field(default=None)
12
+ """
13
+ Voice ID to use for TTS. Defaults to "en-US-daniel"
14
+ """
15
+
16
+ style: typing.Optional[str] = pydantic.Field(default=None)
17
+ """
18
+ The style of speech (optional)
19
+ """
20
+
21
+ rate: typing.Optional[int] = pydantic.Field(default=None)
22
+ """
23
+ Speech rate (optional)
24
+ """
25
+
26
+ pitch: typing.Optional[int] = pydantic.Field(default=None)
27
+ """
28
+ Speech pitch (optional)
29
+ """
30
+
31
+ pronunciation_dictionary: typing.Optional[typing.Dict[str, SendTextVoiceConfigPronunciationDictionaryValue]] = (
32
+ pydantic.Field(default=None)
33
+ )
34
+ """
35
+ A map of words to their pronunciation details. Example 1: {"live":{"type": "IPA", "pronunciation": "laɪv"}}.
36
+ """
37
+
38
+ variation: typing.Optional[int] = pydantic.Field(default=None)
39
+ """
40
+ Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
41
+ """
42
+
43
+ multi_native_locale: typing.Optional[str] = pydantic.Field(default=None)
44
+ """
45
+ Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model. Valid values: "en-US", "en-UK", "es-ES", etc. Use the GET /v1/speed/voices endpoint to retrieve the list of available voices and languages.
46
+ """
47
+
48
+ if IS_PYDANTIC_V2:
49
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
50
+ else:
51
+
52
+ class Config:
53
+ frozen = True
54
+ smart_union = True
55
+ extra = pydantic.Extra.allow
@@ -1,16 +1,16 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from ..core.unchecked_base_model import UncheckedBaseModel
4
- from .set_voice_configuration_voice_config_pronunciation_dictionary_value_type import (
5
- SetVoiceConfigurationVoiceConfigPronunciationDictionaryValueType,
4
+ from .send_text_voice_config_pronunciation_dictionary_value_type import (
5
+ SendTextVoiceConfigPronunciationDictionaryValueType,
6
6
  )
7
7
  import pydantic
8
8
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
9
9
  import typing
10
10
 
11
11
 
12
- class SetVoiceConfigurationVoiceConfigPronunciationDictionaryValue(UncheckedBaseModel):
13
- type: SetVoiceConfigurationVoiceConfigPronunciationDictionaryValueType = pydantic.Field()
12
+ class SendTextVoiceConfigPronunciationDictionaryValue(UncheckedBaseModel):
13
+ type: SendTextVoiceConfigPronunciationDictionaryValueType = pydantic.Field()
14
14
  """
15
15
  Type of pronunciation (IPA or SAY_AS)
16
16
  """
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SendTextVoiceConfigPronunciationDictionaryValueType = typing.Union[typing.Literal["IPA", "SAY_AS"], typing.Any]
@@ -0,0 +1,26 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.unchecked_base_model import UncheckedBaseModel
4
+ from .set_voice_configuration_or_initialize_context_voice_config import (
5
+ SetVoiceConfigurationOrInitializeContextVoiceConfig,
6
+ )
7
+ import typing
8
+ import pydantic
9
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
10
+
11
+
12
+ class SetVoiceConfigurationOrInitializeContext(UncheckedBaseModel):
13
+ voice_config: SetVoiceConfigurationOrInitializeContextVoiceConfig
14
+ context_id: typing.Optional[str] = pydantic.Field(default=None)
15
+ """
16
+ Optional context identifier
17
+ """
18
+
19
+ if IS_PYDANTIC_V2:
20
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
21
+ else:
22
+
23
+ class Config:
24
+ frozen = True
25
+ smart_union = True
26
+ extra = pydantic.Extra.allow
@@ -3,13 +3,13 @@
3
3
  from ..core.unchecked_base_model import UncheckedBaseModel
4
4
  import typing
5
5
  import pydantic
6
- from .set_voice_configuration_voice_config_pronunciation_dictionary_value import (
7
- SetVoiceConfigurationVoiceConfigPronunciationDictionaryValue,
6
+ from .set_voice_configuration_or_initialize_context_voice_config_pronunciation_dictionary_value import (
7
+ SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValue,
8
8
  )
9
9
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
10
10
 
11
11
 
12
- class SetVoiceConfigurationVoiceConfig(UncheckedBaseModel):
12
+ class SetVoiceConfigurationOrInitializeContextVoiceConfig(UncheckedBaseModel):
13
13
  voice_id: typing.Optional[str] = pydantic.Field(default=None)
14
14
  """
15
15
  Voice ID to use for TTS. Defaults to "en-US-daniel"
@@ -31,7 +31,7 @@ class SetVoiceConfigurationVoiceConfig(UncheckedBaseModel):
31
31
  """
32
32
 
33
33
  pronunciation_dictionary: typing.Optional[
34
- typing.Dict[str, SetVoiceConfigurationVoiceConfigPronunciationDictionaryValue]
34
+ typing.Dict[str, SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValue]
35
35
  ] = pydantic.Field(default=None)
36
36
  """
37
37
  A map of words to their pronunciation details. Example 1: {"live":{"type": "IPA", "pronunciation": "laɪv"}}.
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.unchecked_base_model import UncheckedBaseModel
4
+ from .set_voice_configuration_or_initialize_context_voice_config_pronunciation_dictionary_value_type import (
5
+ SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValueType,
6
+ )
7
+ import pydantic
8
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
9
+ import typing
10
+
11
+
12
+ class SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValue(UncheckedBaseModel):
13
+ type: SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValueType = pydantic.Field()
14
+ """
15
+ Type of pronunciation (IPA or SAY_AS)
16
+ """
17
+
18
+ pronunciation: str = pydantic.Field()
19
+ """
20
+ The actual pronunciation string
21
+ """
22
+
23
+ if IS_PYDANTIC_V2:
24
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
25
+ else:
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ extra = pydantic.Extra.allow
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- SetVoiceConfigurationVoiceConfigPronunciationDictionaryValueType = typing.Union[
5
+ SetVoiceConfigurationOrInitializeContextVoiceConfigPronunciationDictionaryValueType = typing.Union[
6
6
  typing.Literal["IPA", "SAY_AS"], typing.Any
7
7
  ]
@@ -26,6 +26,7 @@ class VoiceChangerClient:
26
26
  def convert(
27
27
  self,
28
28
  *,
29
+ voice_id: str,
29
30
  audio_duration: typing.Optional[float] = OMIT,
30
31
  channel_type: typing.Optional[str] = OMIT,
31
32
  encode_output_as_base_64: typing.Optional[bool] = OMIT,
@@ -43,7 +44,6 @@ class VoiceChangerClient:
43
44
  style: typing.Optional[str] = OMIT,
44
45
  transcription: typing.Optional[str] = OMIT,
45
46
  variation: typing.Optional[int] = OMIT,
46
- voice_id: typing.Optional[str] = OMIT,
47
47
  request_options: typing.Optional[RequestOptions] = None,
48
48
  ) -> SpeechToSpeechResponse:
49
49
  """
@@ -51,6 +51,9 @@ class VoiceChangerClient:
51
51
 
52
52
  Parameters
53
53
  ----------
54
+ voice_id : str
55
+ Use the GET /v1/speech/voices API to find supported voiceIds. You can use either the voiceId (e.g. en-US-natalie) or just the voice actor's name (e.g. natalie).
56
+
54
57
  audio_duration : typing.Optional[float]
55
58
  This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
56
59
 
@@ -108,9 +111,6 @@ class VoiceChangerClient:
108
111
  variation : typing.Optional[int]
109
112
  Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
110
113
 
111
- voice_id : typing.Optional[str]
112
- Use the GET /v1/speech/voices API to find supported voiceIds. You can use either the voiceId (e.g. en-US-natalie) or just the voice actor's name (e.g. natalie).
113
-
114
114
  request_options : typing.Optional[RequestOptions]
115
115
  Request-specific configuration.
116
116
 
@@ -126,7 +126,9 @@ class VoiceChangerClient:
126
126
  client = Murf(
127
127
  api_key="YOUR_API_KEY",
128
128
  )
129
- client.voice_changer.convert()
129
+ client.voice_changer.convert(
130
+ voice_id="voice_id",
131
+ )
130
132
  """
131
133
  _response = self._client_wrapper.httpx_client.request(
132
134
  "v1/voice-changer/convert",
@@ -229,6 +231,7 @@ class AsyncVoiceChangerClient:
229
231
  async def convert(
230
232
  self,
231
233
  *,
234
+ voice_id: str,
232
235
  audio_duration: typing.Optional[float] = OMIT,
233
236
  channel_type: typing.Optional[str] = OMIT,
234
237
  encode_output_as_base_64: typing.Optional[bool] = OMIT,
@@ -246,7 +249,6 @@ class AsyncVoiceChangerClient:
246
249
  style: typing.Optional[str] = OMIT,
247
250
  transcription: typing.Optional[str] = OMIT,
248
251
  variation: typing.Optional[int] = OMIT,
249
- voice_id: typing.Optional[str] = OMIT,
250
252
  request_options: typing.Optional[RequestOptions] = None,
251
253
  ) -> SpeechToSpeechResponse:
252
254
  """
@@ -254,6 +256,9 @@ class AsyncVoiceChangerClient:
254
256
 
255
257
  Parameters
256
258
  ----------
259
+ voice_id : str
260
+ Use the GET /v1/speech/voices API to find supported voiceIds. You can use either the voiceId (e.g. en-US-natalie) or just the voice actor's name (e.g. natalie).
261
+
257
262
  audio_duration : typing.Optional[float]
258
263
  This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
259
264
 
@@ -311,9 +316,6 @@ class AsyncVoiceChangerClient:
311
316
  variation : typing.Optional[int]
312
317
  Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
313
318
 
314
- voice_id : typing.Optional[str]
315
- Use the GET /v1/speech/voices API to find supported voiceIds. You can use either the voiceId (e.g. en-US-natalie) or just the voice actor's name (e.g. natalie).
316
-
317
319
  request_options : typing.Optional[RequestOptions]
318
320
  Request-specific configuration.
319
321
 
@@ -334,7 +336,9 @@ class AsyncVoiceChangerClient:
334
336
 
335
337
 
336
338
  async def main() -> None:
337
- await client.voice_changer.convert()
339
+ await client.voice_changer.convert(
340
+ voice_id="voice_id",
341
+ )
338
342
 
339
343
 
340
344
  asyncio.run(main())
@@ -1,20 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- from ..core.unchecked_base_model import UncheckedBaseModel
4
- from .set_voice_configuration_voice_config import SetVoiceConfigurationVoiceConfig
5
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
- import typing
7
- import pydantic
8
-
9
-
10
- class SetVoiceConfiguration(UncheckedBaseModel):
11
- voice_config: SetVoiceConfigurationVoiceConfig
12
-
13
- if IS_PYDANTIC_V2:
14
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
15
- else:
16
-
17
- class Config:
18
- frozen = True
19
- smart_union = True
20
- extra = pydantic.Extra.allow
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes