murf 1.2.1__tar.gz → 1.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of murf might be problematic. Click here for more details.
- {murf-1.2.1 → murf-1.2.2}/PKG-INFO +1 -1
- {murf-1.2.1 → murf-1.2.2}/pyproject.toml +1 -1
- {murf-1.2.1 → murf-1.2.2}/src/murf/__init__.py +3 -3
- {murf-1.2.1 → murf-1.2.2}/src/murf/base_client.py +8 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/client_wrapper.py +1 -1
- murf-1.2.2/src/murf/text/__init__.py +2 -0
- murf-1.2.2/src/murf/text/client.py +262 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/text_to_speech/client.py +26 -8
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/__init__.py +0 -2
- murf-1.2.2/src/murf/voice_changer/__init__.py +2 -0
- murf-1.2.2/src/murf/voice_changer/client.py +431 -0
- murf-1.2.1/src/murf/types/murf_api_translation_request.py +0 -29
- {murf-1.2.1 → murf-1.2.2}/LICENSE +0 -0
- {murf-1.2.1 → murf-1.2.2}/README.md +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/auth/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/auth/client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/api_error.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/datetime_utils.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/file.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/http_client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/jsonable_encoder.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/pydantic_utilities.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/query_encoder.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/remove_none_from_dict.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/request_options.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/core/serialization.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/jobs/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/jobs/client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/jobs/types/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/jobs/types/jobs_create_request_priority.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/jobs/types/jobs_create_with_project_id_request_priority.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/languages/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/languages/client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/projects/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/projects/client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/projects/types/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/projects/types/api_create_project_request_dubbing_type.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/dubbing_client.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/environment.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/errors/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/errors/bad_request_error.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/errors/forbidden_error.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/errors/internal_server_error.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/errors/payment_required_error.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/errors/service_unavailable_error.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/errors/unauthorized_error.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/py.typed +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/text_to_speech/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/text_to_speech/types/__init__.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/text_to_speech/types/generate_speech_request_model_version.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/api_job_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/api_job_response_dubbing_type.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/api_job_response_priority.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/api_project_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/api_project_response_dubbing_type.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/api_voice.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/api_voice_gender.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/auth_token_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/character_count.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/dub_api_detail_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/dub_job_status_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/form_data_content_disposition.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/generate_speech_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/group_api_project_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/locale_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/locale_response_supports_item.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/metadata.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/murf_api_translation_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/pronunciation_detail.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/pronunciation_detail_type.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/source_locale_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/speech_to_speech_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/style_details.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/translation.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/types/word_duration_response.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/utils.py +0 -0
- {murf-1.2.1 → murf-1.2.2}/src/murf/version.py +0 -0
|
@@ -18,7 +18,6 @@ from .types import (
|
|
|
18
18
|
LocaleResponse,
|
|
19
19
|
LocaleResponseSupportsItem,
|
|
20
20
|
Metadata,
|
|
21
|
-
MurfApiTranslationRequest,
|
|
22
21
|
MurfApiTranslationResponse,
|
|
23
22
|
PronunciationDetail,
|
|
24
23
|
PronunciationDetailType,
|
|
@@ -36,7 +35,7 @@ from .errors import (
|
|
|
36
35
|
ServiceUnavailableError,
|
|
37
36
|
UnauthorizedError,
|
|
38
37
|
)
|
|
39
|
-
from . import auth, dubbing, text_to_speech
|
|
38
|
+
from . import auth, dubbing, text, text_to_speech, voice_changer
|
|
40
39
|
from .client import AsyncMurf, Murf
|
|
41
40
|
from .dubbing_client import MurfDub
|
|
42
41
|
from .environment import MurfEnvironment
|
|
@@ -67,7 +66,6 @@ __all__ = [
|
|
|
67
66
|
"LocaleResponseSupportsItem",
|
|
68
67
|
"Metadata",
|
|
69
68
|
"Murf",
|
|
70
|
-
"MurfApiTranslationRequest",
|
|
71
69
|
"MurfApiTranslationResponse",
|
|
72
70
|
"MurfDub",
|
|
73
71
|
"MurfEnvironment",
|
|
@@ -84,5 +82,7 @@ __all__ = [
|
|
|
84
82
|
"__version__",
|
|
85
83
|
"auth",
|
|
86
84
|
"dubbing",
|
|
85
|
+
"text",
|
|
87
86
|
"text_to_speech",
|
|
87
|
+
"voice_changer",
|
|
88
88
|
]
|
|
@@ -6,10 +6,14 @@ import httpx
|
|
|
6
6
|
from .core.client_wrapper import SyncClientWrapper
|
|
7
7
|
from .auth.client import AuthClient
|
|
8
8
|
from .text_to_speech.client import TextToSpeechClient
|
|
9
|
+
from .text.client import TextClient
|
|
10
|
+
from .voice_changer.client import VoiceChangerClient
|
|
9
11
|
from .dubbing.client import DubbingClient
|
|
10
12
|
from .core.client_wrapper import AsyncClientWrapper
|
|
11
13
|
from .auth.client import AsyncAuthClient
|
|
12
14
|
from .text_to_speech.client import AsyncTextToSpeechClient
|
|
15
|
+
from .text.client import AsyncTextClient
|
|
16
|
+
from .voice_changer.client import AsyncVoiceChangerClient
|
|
13
17
|
from .dubbing.client import AsyncDubbingClient
|
|
14
18
|
|
|
15
19
|
|
|
@@ -73,6 +77,8 @@ class BaseClient:
|
|
|
73
77
|
)
|
|
74
78
|
self.auth = AuthClient(client_wrapper=self._client_wrapper)
|
|
75
79
|
self.text_to_speech = TextToSpeechClient(client_wrapper=self._client_wrapper)
|
|
80
|
+
self.text = TextClient(client_wrapper=self._client_wrapper)
|
|
81
|
+
self.voice_changer = VoiceChangerClient(client_wrapper=self._client_wrapper)
|
|
76
82
|
self.dubbing = DubbingClient(client_wrapper=self._client_wrapper)
|
|
77
83
|
|
|
78
84
|
|
|
@@ -136,6 +142,8 @@ class AsyncBaseClient:
|
|
|
136
142
|
)
|
|
137
143
|
self.auth = AsyncAuthClient(client_wrapper=self._client_wrapper)
|
|
138
144
|
self.text_to_speech = AsyncTextToSpeechClient(client_wrapper=self._client_wrapper)
|
|
145
|
+
self.text = AsyncTextClient(client_wrapper=self._client_wrapper)
|
|
146
|
+
self.voice_changer = AsyncVoiceChangerClient(client_wrapper=self._client_wrapper)
|
|
139
147
|
self.dubbing = AsyncDubbingClient(client_wrapper=self._client_wrapper)
|
|
140
148
|
|
|
141
149
|
|
|
@@ -16,7 +16,7 @@ class BaseClientWrapper:
|
|
|
16
16
|
headers: typing.Dict[str, str] = {
|
|
17
17
|
"X-Fern-Language": "Python",
|
|
18
18
|
"X-Fern-SDK-Name": "murf",
|
|
19
|
-
"X-Fern-SDK-Version": "1.2.
|
|
19
|
+
"X-Fern-SDK-Version": "1.2.2",
|
|
20
20
|
}
|
|
21
21
|
if self._api_key is not None:
|
|
22
22
|
headers["api-key"] = self._api_key
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
from ..core.client_wrapper import SyncClientWrapper
|
|
5
|
+
from ..core.request_options import RequestOptions
|
|
6
|
+
from ..types.murf_api_translation_response import MurfApiTranslationResponse
|
|
7
|
+
from ..core.pydantic_utilities import parse_obj_as
|
|
8
|
+
from ..errors.bad_request_error import BadRequestError
|
|
9
|
+
from ..errors.payment_required_error import PaymentRequiredError
|
|
10
|
+
from ..errors.forbidden_error import ForbiddenError
|
|
11
|
+
from ..errors.internal_server_error import InternalServerError
|
|
12
|
+
from ..errors.service_unavailable_error import ServiceUnavailableError
|
|
13
|
+
from json.decoder import JSONDecodeError
|
|
14
|
+
from ..core.api_error import ApiError
|
|
15
|
+
from ..core.client_wrapper import AsyncClientWrapper
|
|
16
|
+
|
|
17
|
+
# this is used as the default value for optional parameters
|
|
18
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TextClient:
|
|
22
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
23
|
+
self._client_wrapper = client_wrapper
|
|
24
|
+
|
|
25
|
+
def translate(
|
|
26
|
+
self,
|
|
27
|
+
*,
|
|
28
|
+
target_language: str,
|
|
29
|
+
texts: typing.Sequence[str],
|
|
30
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
31
|
+
) -> MurfApiTranslationResponse:
|
|
32
|
+
"""
|
|
33
|
+
Parameters
|
|
34
|
+
----------
|
|
35
|
+
target_language : str
|
|
36
|
+
The language code for the target translation
|
|
37
|
+
|
|
38
|
+
texts : typing.Sequence[str]
|
|
39
|
+
List of texts to translate
|
|
40
|
+
|
|
41
|
+
request_options : typing.Optional[RequestOptions]
|
|
42
|
+
Request-specific configuration.
|
|
43
|
+
|
|
44
|
+
Returns
|
|
45
|
+
-------
|
|
46
|
+
MurfApiTranslationResponse
|
|
47
|
+
Ok
|
|
48
|
+
|
|
49
|
+
Examples
|
|
50
|
+
--------
|
|
51
|
+
from murf import Murf
|
|
52
|
+
|
|
53
|
+
client = Murf(
|
|
54
|
+
api_key="YOUR_API_KEY",
|
|
55
|
+
)
|
|
56
|
+
client.text.translate(
|
|
57
|
+
target_language="es-ES",
|
|
58
|
+
texts=["Hello, world.", "How are you?"],
|
|
59
|
+
)
|
|
60
|
+
"""
|
|
61
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
62
|
+
"v1/text/translate",
|
|
63
|
+
method="POST",
|
|
64
|
+
json={
|
|
65
|
+
"targetLanguage": target_language,
|
|
66
|
+
"texts": texts,
|
|
67
|
+
},
|
|
68
|
+
headers={
|
|
69
|
+
"content-type": "application/json",
|
|
70
|
+
},
|
|
71
|
+
request_options=request_options,
|
|
72
|
+
omit=OMIT,
|
|
73
|
+
)
|
|
74
|
+
try:
|
|
75
|
+
if 200 <= _response.status_code < 300:
|
|
76
|
+
return typing.cast(
|
|
77
|
+
MurfApiTranslationResponse,
|
|
78
|
+
parse_obj_as(
|
|
79
|
+
type_=MurfApiTranslationResponse, # type: ignore
|
|
80
|
+
object_=_response.json(),
|
|
81
|
+
),
|
|
82
|
+
)
|
|
83
|
+
if _response.status_code == 400:
|
|
84
|
+
raise BadRequestError(
|
|
85
|
+
typing.cast(
|
|
86
|
+
typing.Optional[typing.Any],
|
|
87
|
+
parse_obj_as(
|
|
88
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
89
|
+
object_=_response.json(),
|
|
90
|
+
),
|
|
91
|
+
)
|
|
92
|
+
)
|
|
93
|
+
if _response.status_code == 402:
|
|
94
|
+
raise PaymentRequiredError(
|
|
95
|
+
typing.cast(
|
|
96
|
+
typing.Optional[typing.Any],
|
|
97
|
+
parse_obj_as(
|
|
98
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
99
|
+
object_=_response.json(),
|
|
100
|
+
),
|
|
101
|
+
)
|
|
102
|
+
)
|
|
103
|
+
if _response.status_code == 403:
|
|
104
|
+
raise ForbiddenError(
|
|
105
|
+
typing.cast(
|
|
106
|
+
typing.Optional[typing.Any],
|
|
107
|
+
parse_obj_as(
|
|
108
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
109
|
+
object_=_response.json(),
|
|
110
|
+
),
|
|
111
|
+
)
|
|
112
|
+
)
|
|
113
|
+
if _response.status_code == 500:
|
|
114
|
+
raise InternalServerError(
|
|
115
|
+
typing.cast(
|
|
116
|
+
typing.Optional[typing.Any],
|
|
117
|
+
parse_obj_as(
|
|
118
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
119
|
+
object_=_response.json(),
|
|
120
|
+
),
|
|
121
|
+
)
|
|
122
|
+
)
|
|
123
|
+
if _response.status_code == 503:
|
|
124
|
+
raise ServiceUnavailableError(
|
|
125
|
+
typing.cast(
|
|
126
|
+
typing.Optional[typing.Any],
|
|
127
|
+
parse_obj_as(
|
|
128
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
129
|
+
object_=_response.json(),
|
|
130
|
+
),
|
|
131
|
+
)
|
|
132
|
+
)
|
|
133
|
+
_response_json = _response.json()
|
|
134
|
+
except JSONDecodeError:
|
|
135
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
136
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class AsyncTextClient:
|
|
140
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
141
|
+
self._client_wrapper = client_wrapper
|
|
142
|
+
|
|
143
|
+
async def translate(
|
|
144
|
+
self,
|
|
145
|
+
*,
|
|
146
|
+
target_language: str,
|
|
147
|
+
texts: typing.Sequence[str],
|
|
148
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
149
|
+
) -> MurfApiTranslationResponse:
|
|
150
|
+
"""
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
target_language : str
|
|
154
|
+
The language code for the target translation
|
|
155
|
+
|
|
156
|
+
texts : typing.Sequence[str]
|
|
157
|
+
List of texts to translate
|
|
158
|
+
|
|
159
|
+
request_options : typing.Optional[RequestOptions]
|
|
160
|
+
Request-specific configuration.
|
|
161
|
+
|
|
162
|
+
Returns
|
|
163
|
+
-------
|
|
164
|
+
MurfApiTranslationResponse
|
|
165
|
+
Ok
|
|
166
|
+
|
|
167
|
+
Examples
|
|
168
|
+
--------
|
|
169
|
+
import asyncio
|
|
170
|
+
|
|
171
|
+
from murf import AsyncMurf
|
|
172
|
+
|
|
173
|
+
client = AsyncMurf(
|
|
174
|
+
api_key="YOUR_API_KEY",
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
async def main() -> None:
|
|
179
|
+
await client.text.translate(
|
|
180
|
+
target_language="es-ES",
|
|
181
|
+
texts=["Hello, world.", "How are you?"],
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
asyncio.run(main())
|
|
186
|
+
"""
|
|
187
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
188
|
+
"v1/text/translate",
|
|
189
|
+
method="POST",
|
|
190
|
+
json={
|
|
191
|
+
"targetLanguage": target_language,
|
|
192
|
+
"texts": texts,
|
|
193
|
+
},
|
|
194
|
+
headers={
|
|
195
|
+
"content-type": "application/json",
|
|
196
|
+
},
|
|
197
|
+
request_options=request_options,
|
|
198
|
+
omit=OMIT,
|
|
199
|
+
)
|
|
200
|
+
try:
|
|
201
|
+
if 200 <= _response.status_code < 300:
|
|
202
|
+
return typing.cast(
|
|
203
|
+
MurfApiTranslationResponse,
|
|
204
|
+
parse_obj_as(
|
|
205
|
+
type_=MurfApiTranslationResponse, # type: ignore
|
|
206
|
+
object_=_response.json(),
|
|
207
|
+
),
|
|
208
|
+
)
|
|
209
|
+
if _response.status_code == 400:
|
|
210
|
+
raise BadRequestError(
|
|
211
|
+
typing.cast(
|
|
212
|
+
typing.Optional[typing.Any],
|
|
213
|
+
parse_obj_as(
|
|
214
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
215
|
+
object_=_response.json(),
|
|
216
|
+
),
|
|
217
|
+
)
|
|
218
|
+
)
|
|
219
|
+
if _response.status_code == 402:
|
|
220
|
+
raise PaymentRequiredError(
|
|
221
|
+
typing.cast(
|
|
222
|
+
typing.Optional[typing.Any],
|
|
223
|
+
parse_obj_as(
|
|
224
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
225
|
+
object_=_response.json(),
|
|
226
|
+
),
|
|
227
|
+
)
|
|
228
|
+
)
|
|
229
|
+
if _response.status_code == 403:
|
|
230
|
+
raise ForbiddenError(
|
|
231
|
+
typing.cast(
|
|
232
|
+
typing.Optional[typing.Any],
|
|
233
|
+
parse_obj_as(
|
|
234
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
235
|
+
object_=_response.json(),
|
|
236
|
+
),
|
|
237
|
+
)
|
|
238
|
+
)
|
|
239
|
+
if _response.status_code == 500:
|
|
240
|
+
raise InternalServerError(
|
|
241
|
+
typing.cast(
|
|
242
|
+
typing.Optional[typing.Any],
|
|
243
|
+
parse_obj_as(
|
|
244
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
245
|
+
object_=_response.json(),
|
|
246
|
+
),
|
|
247
|
+
)
|
|
248
|
+
)
|
|
249
|
+
if _response.status_code == 503:
|
|
250
|
+
raise ServiceUnavailableError(
|
|
251
|
+
typing.cast(
|
|
252
|
+
typing.Optional[typing.Any],
|
|
253
|
+
parse_obj_as(
|
|
254
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
255
|
+
object_=_response.json(),
|
|
256
|
+
),
|
|
257
|
+
)
|
|
258
|
+
)
|
|
259
|
+
_response_json = _response.json()
|
|
260
|
+
except JSONDecodeError:
|
|
261
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
262
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -215,7 +215,6 @@ class TextToSpeechClient:
|
|
|
215
215
|
*,
|
|
216
216
|
text: str,
|
|
217
217
|
voice_id: str,
|
|
218
|
-
token: typing.Optional[str] = None,
|
|
219
218
|
channel_type: typing.Optional[str] = OMIT,
|
|
220
219
|
format: typing.Optional[str] = OMIT,
|
|
221
220
|
multi_native_locale: typing.Optional[str] = OMIT,
|
|
@@ -237,8 +236,6 @@ class TextToSpeechClient:
|
|
|
237
236
|
voice_id : str
|
|
238
237
|
Use the GET /v1/speech/voices api to find supported voiceIds.
|
|
239
238
|
|
|
240
|
-
token : typing.Optional[str]
|
|
241
|
-
|
|
242
239
|
channel_type : typing.Optional[str]
|
|
243
240
|
Valid values: STEREO, MONO
|
|
244
241
|
|
|
@@ -271,6 +268,15 @@ class TextToSpeechClient:
|
|
|
271
268
|
------
|
|
272
269
|
typing.Iterator[bytes]
|
|
273
270
|
Ok
|
|
271
|
+
|
|
272
|
+
Examples
|
|
273
|
+
--------
|
|
274
|
+
from murf import Murf
|
|
275
|
+
|
|
276
|
+
client = Murf(
|
|
277
|
+
api_key="YOUR_API_KEY",
|
|
278
|
+
)
|
|
279
|
+
client.text_to_speech.stream()
|
|
274
280
|
"""
|
|
275
281
|
with self._client_wrapper.httpx_client.stream(
|
|
276
282
|
"v1/speech/stream",
|
|
@@ -289,7 +295,6 @@ class TextToSpeechClient:
|
|
|
289
295
|
},
|
|
290
296
|
headers={
|
|
291
297
|
"content-type": "application/json",
|
|
292
|
-
"token": str(token) if token is not None else None,
|
|
293
298
|
},
|
|
294
299
|
request_options=request_options,
|
|
295
300
|
omit=OMIT,
|
|
@@ -647,7 +652,6 @@ class AsyncTextToSpeechClient:
|
|
|
647
652
|
*,
|
|
648
653
|
text: str,
|
|
649
654
|
voice_id: str,
|
|
650
|
-
token: typing.Optional[str] = None,
|
|
651
655
|
channel_type: typing.Optional[str] = OMIT,
|
|
652
656
|
format: typing.Optional[str] = OMIT,
|
|
653
657
|
multi_native_locale: typing.Optional[str] = OMIT,
|
|
@@ -669,8 +673,6 @@ class AsyncTextToSpeechClient:
|
|
|
669
673
|
voice_id : str
|
|
670
674
|
Use the GET /v1/speech/voices api to find supported voiceIds.
|
|
671
675
|
|
|
672
|
-
token : typing.Optional[str]
|
|
673
|
-
|
|
674
676
|
channel_type : typing.Optional[str]
|
|
675
677
|
Valid values: STEREO, MONO
|
|
676
678
|
|
|
@@ -703,6 +705,23 @@ class AsyncTextToSpeechClient:
|
|
|
703
705
|
------
|
|
704
706
|
typing.AsyncIterator[bytes]
|
|
705
707
|
Ok
|
|
708
|
+
|
|
709
|
+
Examples
|
|
710
|
+
--------
|
|
711
|
+
import asyncio
|
|
712
|
+
|
|
713
|
+
from murf import AsyncMurf
|
|
714
|
+
|
|
715
|
+
client = AsyncMurf(
|
|
716
|
+
api_key="YOUR_API_KEY",
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
|
|
720
|
+
async def main() -> None:
|
|
721
|
+
await client.text_to_speech.stream()
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
asyncio.run(main())
|
|
706
725
|
"""
|
|
707
726
|
async with self._client_wrapper.httpx_client.stream(
|
|
708
727
|
"v1/speech/stream",
|
|
@@ -721,7 +740,6 @@ class AsyncTextToSpeechClient:
|
|
|
721
740
|
},
|
|
722
741
|
headers={
|
|
723
742
|
"content-type": "application/json",
|
|
724
|
-
"token": str(token) if token is not None else None,
|
|
725
743
|
},
|
|
726
744
|
request_options=request_options,
|
|
727
745
|
omit=OMIT,
|
|
@@ -17,7 +17,6 @@ from .group_api_project_response import GroupApiProjectResponse
|
|
|
17
17
|
from .locale_response import LocaleResponse
|
|
18
18
|
from .locale_response_supports_item import LocaleResponseSupportsItem
|
|
19
19
|
from .metadata import Metadata
|
|
20
|
-
from .murf_api_translation_request import MurfApiTranslationRequest
|
|
21
20
|
from .murf_api_translation_response import MurfApiTranslationResponse
|
|
22
21
|
from .pronunciation_detail import PronunciationDetail
|
|
23
22
|
from .pronunciation_detail_type import PronunciationDetailType
|
|
@@ -45,7 +44,6 @@ __all__ = [
|
|
|
45
44
|
"LocaleResponse",
|
|
46
45
|
"LocaleResponseSupportsItem",
|
|
47
46
|
"Metadata",
|
|
48
|
-
"MurfApiTranslationRequest",
|
|
49
47
|
"MurfApiTranslationResponse",
|
|
50
48
|
"PronunciationDetail",
|
|
51
49
|
"PronunciationDetailType",
|
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
from ..core.client_wrapper import SyncClientWrapper
|
|
5
|
+
from .. import core
|
|
6
|
+
from ..core.request_options import RequestOptions
|
|
7
|
+
from ..types.speech_to_speech_response import SpeechToSpeechResponse
|
|
8
|
+
from ..core.pydantic_utilities import parse_obj_as
|
|
9
|
+
from ..errors.bad_request_error import BadRequestError
|
|
10
|
+
from ..errors.payment_required_error import PaymentRequiredError
|
|
11
|
+
from ..errors.forbidden_error import ForbiddenError
|
|
12
|
+
from ..errors.internal_server_error import InternalServerError
|
|
13
|
+
from ..errors.service_unavailable_error import ServiceUnavailableError
|
|
14
|
+
from json.decoder import JSONDecodeError
|
|
15
|
+
from ..core.api_error import ApiError
|
|
16
|
+
from ..core.client_wrapper import AsyncClientWrapper
|
|
17
|
+
|
|
18
|
+
# this is used as the default value for optional parameters
|
|
19
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VoiceChangerClient:
|
|
23
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
24
|
+
self._client_wrapper = client_wrapper
|
|
25
|
+
|
|
26
|
+
def convert(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
audio_duration: typing.Optional[float] = OMIT,
|
|
30
|
+
channel_type: typing.Optional[str] = OMIT,
|
|
31
|
+
encode_output_as_base_64: typing.Optional[bool] = OMIT,
|
|
32
|
+
file: typing.Optional[core.File] = OMIT,
|
|
33
|
+
file_url: typing.Optional[str] = OMIT,
|
|
34
|
+
format: typing.Optional[str] = OMIT,
|
|
35
|
+
multi_native_locale: typing.Optional[str] = OMIT,
|
|
36
|
+
pitch: typing.Optional[int] = OMIT,
|
|
37
|
+
pronunciation_dictionary: typing.Optional[str] = OMIT,
|
|
38
|
+
rate: typing.Optional[int] = OMIT,
|
|
39
|
+
retain_accent: typing.Optional[bool] = OMIT,
|
|
40
|
+
retain_prosody: typing.Optional[bool] = OMIT,
|
|
41
|
+
return_transcription: typing.Optional[bool] = OMIT,
|
|
42
|
+
sample_rate: typing.Optional[float] = OMIT,
|
|
43
|
+
style: typing.Optional[str] = OMIT,
|
|
44
|
+
transcription: typing.Optional[str] = OMIT,
|
|
45
|
+
variation: typing.Optional[int] = OMIT,
|
|
46
|
+
voice_id: typing.Optional[str] = OMIT,
|
|
47
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
48
|
+
) -> SpeechToSpeechResponse:
|
|
49
|
+
"""
|
|
50
|
+
Returns a url to the generated audio file along with other associated properties.
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
audio_duration : typing.Optional[float]
|
|
55
|
+
This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
|
|
56
|
+
|
|
57
|
+
channel_type : typing.Optional[str]
|
|
58
|
+
Valid values: STEREO, MONO
|
|
59
|
+
|
|
60
|
+
encode_output_as_base_64 : typing.Optional[bool]
|
|
61
|
+
Set to true to receive audio in response as a Base64 encoded string along with a url.
|
|
62
|
+
|
|
63
|
+
file : typing.Optional[core.File]
|
|
64
|
+
See core.File for more documentation
|
|
65
|
+
|
|
66
|
+
file_url : typing.Optional[str]
|
|
67
|
+
|
|
68
|
+
format : typing.Optional[str]
|
|
69
|
+
Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW
|
|
70
|
+
|
|
71
|
+
multi_native_locale : typing.Optional[str]
|
|
72
|
+
Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
|
|
73
|
+
Valid values: "en-US", "en-UK", "es-ES", etc.
|
|
74
|
+
|
|
75
|
+
Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
|
|
76
|
+
|
|
77
|
+
pitch : typing.Optional[int]
|
|
78
|
+
Pitch of the voiceover
|
|
79
|
+
|
|
80
|
+
pronunciation_dictionary : typing.Optional[str]
|
|
81
|
+
A JSON string that defines custom pronunciations for specific words or phrases. Each key is a word or phrase, and its value is an object with `type` and `pronunciation`.
|
|
82
|
+
|
|
83
|
+
Example 1: '{"live": {"type": "IPA", "pronunciation": "laɪv"}}'
|
|
84
|
+
|
|
85
|
+
Example 2: '{"2022": {"type": "SAY_AS", "pronunciation": "twenty twenty two"}}'
|
|
86
|
+
|
|
87
|
+
rate : typing.Optional[int]
|
|
88
|
+
Speed of the voiceover
|
|
89
|
+
|
|
90
|
+
retain_accent : typing.Optional[bool]
|
|
91
|
+
Set to true to retain the original accent of the speaker during voice generation.
|
|
92
|
+
|
|
93
|
+
retain_prosody : typing.Optional[bool]
|
|
94
|
+
Indicates whether to retain the original prosody (intonation, rhythm, and stress) of the input voice in the generated output.
|
|
95
|
+
|
|
96
|
+
return_transcription : typing.Optional[bool]
|
|
97
|
+
Set to true to include a textual transcription of the generated audio in the response.
|
|
98
|
+
|
|
99
|
+
sample_rate : typing.Optional[float]
|
|
100
|
+
Valid values are 8000, 24000, 44100, 48000
|
|
101
|
+
|
|
102
|
+
style : typing.Optional[str]
|
|
103
|
+
The voice style to be used for voiceover generation.
|
|
104
|
+
|
|
105
|
+
transcription : typing.Optional[str]
|
|
106
|
+
This parameter allows specifying a transcription of the audio clip, which will then be used as input for the voice changer
|
|
107
|
+
|
|
108
|
+
variation : typing.Optional[int]
|
|
109
|
+
Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
|
|
110
|
+
|
|
111
|
+
voice_id : typing.Optional[str]
|
|
112
|
+
Use the GET /v1/speech/voices api to find supported voiceIds.
|
|
113
|
+
|
|
114
|
+
request_options : typing.Optional[RequestOptions]
|
|
115
|
+
Request-specific configuration.
|
|
116
|
+
|
|
117
|
+
Returns
|
|
118
|
+
-------
|
|
119
|
+
SpeechToSpeechResponse
|
|
120
|
+
Ok
|
|
121
|
+
|
|
122
|
+
Examples
|
|
123
|
+
--------
|
|
124
|
+
from murf import Murf
|
|
125
|
+
|
|
126
|
+
client = Murf(
|
|
127
|
+
api_key="YOUR_API_KEY",
|
|
128
|
+
)
|
|
129
|
+
client.voice_changer.convert()
|
|
130
|
+
"""
|
|
131
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
132
|
+
"v1/voice-changer/convert",
|
|
133
|
+
method="POST",
|
|
134
|
+
data={
|
|
135
|
+
"audio_duration": audio_duration,
|
|
136
|
+
"channel_type": channel_type,
|
|
137
|
+
"encode_output_as_base64": encode_output_as_base_64,
|
|
138
|
+
"file_url": file_url,
|
|
139
|
+
"format": format,
|
|
140
|
+
"multi_native_locale": multi_native_locale,
|
|
141
|
+
"pitch": pitch,
|
|
142
|
+
"pronunciation_dictionary": pronunciation_dictionary,
|
|
143
|
+
"rate": rate,
|
|
144
|
+
"retain_accent": retain_accent,
|
|
145
|
+
"retain_prosody": retain_prosody,
|
|
146
|
+
"return_transcription": return_transcription,
|
|
147
|
+
"sample_rate": sample_rate,
|
|
148
|
+
"style": style,
|
|
149
|
+
"transcription": transcription,
|
|
150
|
+
"variation": variation,
|
|
151
|
+
"voice_id": voice_id,
|
|
152
|
+
},
|
|
153
|
+
files={
|
|
154
|
+
"file": file,
|
|
155
|
+
},
|
|
156
|
+
request_options=request_options,
|
|
157
|
+
omit=OMIT,
|
|
158
|
+
)
|
|
159
|
+
try:
|
|
160
|
+
if 200 <= _response.status_code < 300:
|
|
161
|
+
return typing.cast(
|
|
162
|
+
SpeechToSpeechResponse,
|
|
163
|
+
parse_obj_as(
|
|
164
|
+
type_=SpeechToSpeechResponse, # type: ignore
|
|
165
|
+
object_=_response.json(),
|
|
166
|
+
),
|
|
167
|
+
)
|
|
168
|
+
if _response.status_code == 400:
|
|
169
|
+
raise BadRequestError(
|
|
170
|
+
typing.cast(
|
|
171
|
+
typing.Optional[typing.Any],
|
|
172
|
+
parse_obj_as(
|
|
173
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
174
|
+
object_=_response.json(),
|
|
175
|
+
),
|
|
176
|
+
)
|
|
177
|
+
)
|
|
178
|
+
if _response.status_code == 402:
|
|
179
|
+
raise PaymentRequiredError(
|
|
180
|
+
typing.cast(
|
|
181
|
+
typing.Optional[typing.Any],
|
|
182
|
+
parse_obj_as(
|
|
183
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
184
|
+
object_=_response.json(),
|
|
185
|
+
),
|
|
186
|
+
)
|
|
187
|
+
)
|
|
188
|
+
if _response.status_code == 403:
|
|
189
|
+
raise ForbiddenError(
|
|
190
|
+
typing.cast(
|
|
191
|
+
typing.Optional[typing.Any],
|
|
192
|
+
parse_obj_as(
|
|
193
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
194
|
+
object_=_response.json(),
|
|
195
|
+
),
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
if _response.status_code == 500:
|
|
199
|
+
raise InternalServerError(
|
|
200
|
+
typing.cast(
|
|
201
|
+
typing.Optional[typing.Any],
|
|
202
|
+
parse_obj_as(
|
|
203
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
204
|
+
object_=_response.json(),
|
|
205
|
+
),
|
|
206
|
+
)
|
|
207
|
+
)
|
|
208
|
+
if _response.status_code == 503:
|
|
209
|
+
raise ServiceUnavailableError(
|
|
210
|
+
typing.cast(
|
|
211
|
+
typing.Optional[typing.Any],
|
|
212
|
+
parse_obj_as(
|
|
213
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
214
|
+
object_=_response.json(),
|
|
215
|
+
),
|
|
216
|
+
)
|
|
217
|
+
)
|
|
218
|
+
_response_json = _response.json()
|
|
219
|
+
except JSONDecodeError:
|
|
220
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
221
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
class AsyncVoiceChangerClient:
|
|
225
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
226
|
+
self._client_wrapper = client_wrapper
|
|
227
|
+
|
|
228
|
+
async def convert(
|
|
229
|
+
self,
|
|
230
|
+
*,
|
|
231
|
+
audio_duration: typing.Optional[float] = OMIT,
|
|
232
|
+
channel_type: typing.Optional[str] = OMIT,
|
|
233
|
+
encode_output_as_base_64: typing.Optional[bool] = OMIT,
|
|
234
|
+
file: typing.Optional[core.File] = OMIT,
|
|
235
|
+
file_url: typing.Optional[str] = OMIT,
|
|
236
|
+
format: typing.Optional[str] = OMIT,
|
|
237
|
+
multi_native_locale: typing.Optional[str] = OMIT,
|
|
238
|
+
pitch: typing.Optional[int] = OMIT,
|
|
239
|
+
pronunciation_dictionary: typing.Optional[str] = OMIT,
|
|
240
|
+
rate: typing.Optional[int] = OMIT,
|
|
241
|
+
retain_accent: typing.Optional[bool] = OMIT,
|
|
242
|
+
retain_prosody: typing.Optional[bool] = OMIT,
|
|
243
|
+
return_transcription: typing.Optional[bool] = OMIT,
|
|
244
|
+
sample_rate: typing.Optional[float] = OMIT,
|
|
245
|
+
style: typing.Optional[str] = OMIT,
|
|
246
|
+
transcription: typing.Optional[str] = OMIT,
|
|
247
|
+
variation: typing.Optional[int] = OMIT,
|
|
248
|
+
voice_id: typing.Optional[str] = OMIT,
|
|
249
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
250
|
+
) -> SpeechToSpeechResponse:
|
|
251
|
+
"""
|
|
252
|
+
Returns a url to the generated audio file along with other associated properties.
|
|
253
|
+
|
|
254
|
+
Parameters
|
|
255
|
+
----------
|
|
256
|
+
audio_duration : typing.Optional[float]
|
|
257
|
+
This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
|
|
258
|
+
|
|
259
|
+
channel_type : typing.Optional[str]
|
|
260
|
+
Valid values: STEREO, MONO
|
|
261
|
+
|
|
262
|
+
encode_output_as_base_64 : typing.Optional[bool]
|
|
263
|
+
Set to true to receive audio in response as a Base64 encoded string along with a url.
|
|
264
|
+
|
|
265
|
+
file : typing.Optional[core.File]
|
|
266
|
+
See core.File for more documentation
|
|
267
|
+
|
|
268
|
+
file_url : typing.Optional[str]
|
|
269
|
+
|
|
270
|
+
format : typing.Optional[str]
|
|
271
|
+
Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW
|
|
272
|
+
|
|
273
|
+
multi_native_locale : typing.Optional[str]
|
|
274
|
+
Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
|
|
275
|
+
Valid values: "en-US", "en-UK", "es-ES", etc.
|
|
276
|
+
|
|
277
|
+
Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
|
|
278
|
+
|
|
279
|
+
pitch : typing.Optional[int]
|
|
280
|
+
Pitch of the voiceover
|
|
281
|
+
|
|
282
|
+
pronunciation_dictionary : typing.Optional[str]
|
|
283
|
+
A JSON string that defines custom pronunciations for specific words or phrases. Each key is a word or phrase, and its value is an object with `type` and `pronunciation`.
|
|
284
|
+
|
|
285
|
+
Example 1: '{"live": {"type": "IPA", "pronunciation": "laɪv"}}'
|
|
286
|
+
|
|
287
|
+
Example 2: '{"2022": {"type": "SAY_AS", "pronunciation": "twenty twenty two"}}'
|
|
288
|
+
|
|
289
|
+
rate : typing.Optional[int]
|
|
290
|
+
Speed of the voiceover
|
|
291
|
+
|
|
292
|
+
retain_accent : typing.Optional[bool]
|
|
293
|
+
Set to true to retain the original accent of the speaker during voice generation.
|
|
294
|
+
|
|
295
|
+
retain_prosody : typing.Optional[bool]
|
|
296
|
+
Indicates whether to retain the original prosody (intonation, rhythm, and stress) of the input voice in the generated output.
|
|
297
|
+
|
|
298
|
+
return_transcription : typing.Optional[bool]
|
|
299
|
+
Set to true to include a textual transcription of the generated audio in the response.
|
|
300
|
+
|
|
301
|
+
sample_rate : typing.Optional[float]
|
|
302
|
+
Valid values are 8000, 24000, 44100, 48000
|
|
303
|
+
|
|
304
|
+
style : typing.Optional[str]
|
|
305
|
+
The voice style to be used for voiceover generation.
|
|
306
|
+
|
|
307
|
+
transcription : typing.Optional[str]
|
|
308
|
+
This parameter allows specifying a transcription of the audio clip, which will then be used as input for the voice changer
|
|
309
|
+
|
|
310
|
+
variation : typing.Optional[int]
|
|
311
|
+
Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
|
|
312
|
+
|
|
313
|
+
voice_id : typing.Optional[str]
|
|
314
|
+
Use the GET /v1/speech/voices api to find supported voiceIds.
|
|
315
|
+
|
|
316
|
+
request_options : typing.Optional[RequestOptions]
|
|
317
|
+
Request-specific configuration.
|
|
318
|
+
|
|
319
|
+
Returns
|
|
320
|
+
-------
|
|
321
|
+
SpeechToSpeechResponse
|
|
322
|
+
Ok
|
|
323
|
+
|
|
324
|
+
Examples
|
|
325
|
+
--------
|
|
326
|
+
import asyncio
|
|
327
|
+
|
|
328
|
+
from murf import AsyncMurf
|
|
329
|
+
|
|
330
|
+
client = AsyncMurf(
|
|
331
|
+
api_key="YOUR_API_KEY",
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
async def main() -> None:
|
|
336
|
+
await client.voice_changer.convert()
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
asyncio.run(main())
|
|
340
|
+
"""
|
|
341
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
342
|
+
"v1/voice-changer/convert",
|
|
343
|
+
method="POST",
|
|
344
|
+
data={
|
|
345
|
+
"audio_duration": audio_duration,
|
|
346
|
+
"channel_type": channel_type,
|
|
347
|
+
"encode_output_as_base64": encode_output_as_base_64,
|
|
348
|
+
"file_url": file_url,
|
|
349
|
+
"format": format,
|
|
350
|
+
"multi_native_locale": multi_native_locale,
|
|
351
|
+
"pitch": pitch,
|
|
352
|
+
"pronunciation_dictionary": pronunciation_dictionary,
|
|
353
|
+
"rate": rate,
|
|
354
|
+
"retain_accent": retain_accent,
|
|
355
|
+
"retain_prosody": retain_prosody,
|
|
356
|
+
"return_transcription": return_transcription,
|
|
357
|
+
"sample_rate": sample_rate,
|
|
358
|
+
"style": style,
|
|
359
|
+
"transcription": transcription,
|
|
360
|
+
"variation": variation,
|
|
361
|
+
"voice_id": voice_id,
|
|
362
|
+
},
|
|
363
|
+
files={
|
|
364
|
+
"file": file,
|
|
365
|
+
},
|
|
366
|
+
request_options=request_options,
|
|
367
|
+
omit=OMIT,
|
|
368
|
+
)
|
|
369
|
+
try:
|
|
370
|
+
if 200 <= _response.status_code < 300:
|
|
371
|
+
return typing.cast(
|
|
372
|
+
SpeechToSpeechResponse,
|
|
373
|
+
parse_obj_as(
|
|
374
|
+
type_=SpeechToSpeechResponse, # type: ignore
|
|
375
|
+
object_=_response.json(),
|
|
376
|
+
),
|
|
377
|
+
)
|
|
378
|
+
if _response.status_code == 400:
|
|
379
|
+
raise BadRequestError(
|
|
380
|
+
typing.cast(
|
|
381
|
+
typing.Optional[typing.Any],
|
|
382
|
+
parse_obj_as(
|
|
383
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
384
|
+
object_=_response.json(),
|
|
385
|
+
),
|
|
386
|
+
)
|
|
387
|
+
)
|
|
388
|
+
if _response.status_code == 402:
|
|
389
|
+
raise PaymentRequiredError(
|
|
390
|
+
typing.cast(
|
|
391
|
+
typing.Optional[typing.Any],
|
|
392
|
+
parse_obj_as(
|
|
393
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
394
|
+
object_=_response.json(),
|
|
395
|
+
),
|
|
396
|
+
)
|
|
397
|
+
)
|
|
398
|
+
if _response.status_code == 403:
|
|
399
|
+
raise ForbiddenError(
|
|
400
|
+
typing.cast(
|
|
401
|
+
typing.Optional[typing.Any],
|
|
402
|
+
parse_obj_as(
|
|
403
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
404
|
+
object_=_response.json(),
|
|
405
|
+
),
|
|
406
|
+
)
|
|
407
|
+
)
|
|
408
|
+
if _response.status_code == 500:
|
|
409
|
+
raise InternalServerError(
|
|
410
|
+
typing.cast(
|
|
411
|
+
typing.Optional[typing.Any],
|
|
412
|
+
parse_obj_as(
|
|
413
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
414
|
+
object_=_response.json(),
|
|
415
|
+
),
|
|
416
|
+
)
|
|
417
|
+
)
|
|
418
|
+
if _response.status_code == 503:
|
|
419
|
+
raise ServiceUnavailableError(
|
|
420
|
+
typing.cast(
|
|
421
|
+
typing.Optional[typing.Any],
|
|
422
|
+
parse_obj_as(
|
|
423
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
424
|
+
object_=_response.json(),
|
|
425
|
+
),
|
|
426
|
+
)
|
|
427
|
+
)
|
|
428
|
+
_response_json = _response.json()
|
|
429
|
+
except JSONDecodeError:
|
|
430
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
431
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
from ..core.pydantic_utilities import UniversalBaseModel
|
|
4
|
-
import typing_extensions
|
|
5
|
-
from ..core.serialization import FieldMetadata
|
|
6
|
-
import pydantic
|
|
7
|
-
import typing
|
|
8
|
-
from ..core.pydantic_utilities import IS_PYDANTIC_V2
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class MurfApiTranslationRequest(UniversalBaseModel):
|
|
12
|
-
target_language: typing_extensions.Annotated[str, FieldMetadata(alias="targetLanguage")] = pydantic.Field()
|
|
13
|
-
"""
|
|
14
|
-
The language code for the target translation
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
texts: typing.List[str] = pydantic.Field()
|
|
18
|
-
"""
|
|
19
|
-
List of texts to translate
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
if IS_PYDANTIC_V2:
|
|
23
|
-
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
|
24
|
-
else:
|
|
25
|
-
|
|
26
|
-
class Config:
|
|
27
|
-
frozen = True
|
|
28
|
-
smart_union = True
|
|
29
|
-
extra = pydantic.Extra.allow
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{murf-1.2.1 → murf-1.2.2}/src/murf/dubbing/projects/types/api_create_project_request_dubbing_type.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{murf-1.2.1 → murf-1.2.2}/src/murf/text_to_speech/types/generate_speech_request_model_version.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|