murf 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of murf might be problematic. Click here for more details.
- murf/__init__.py +15 -3
- murf/base_client.py +8 -0
- murf/core/client_wrapper.py +1 -1
- murf/text/__init__.py +2 -0
- murf/text/client.py +262 -0
- murf/text_to_speech/client.py +314 -2
- murf/types/__init__.py +12 -2
- murf/types/character_count.py +20 -0
- murf/types/generate_speech_response.py +11 -15
- murf/types/metadata.py +22 -0
- murf/types/murf_api_translation_response.py +22 -0
- murf/types/speech_to_speech_response.py +47 -0
- murf/types/translation.py +20 -0
- murf/types/{word_duration.py → word_duration_response.py} +19 -5
- murf/voice_changer/__init__.py +2 -0
- murf/voice_changer/client.py +431 -0
- {murf-1.2.0.dist-info → murf-1.2.2.dist-info}/METADATA +4 -4
- {murf-1.2.0.dist-info → murf-1.2.2.dist-info}/RECORD +20 -11
- {murf-1.2.0.dist-info → murf-1.2.2.dist-info}/LICENSE +0 -0
- {murf-1.2.0.dist-info → murf-1.2.2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
from ..core.client_wrapper import SyncClientWrapper
|
|
5
|
+
from .. import core
|
|
6
|
+
from ..core.request_options import RequestOptions
|
|
7
|
+
from ..types.speech_to_speech_response import SpeechToSpeechResponse
|
|
8
|
+
from ..core.pydantic_utilities import parse_obj_as
|
|
9
|
+
from ..errors.bad_request_error import BadRequestError
|
|
10
|
+
from ..errors.payment_required_error import PaymentRequiredError
|
|
11
|
+
from ..errors.forbidden_error import ForbiddenError
|
|
12
|
+
from ..errors.internal_server_error import InternalServerError
|
|
13
|
+
from ..errors.service_unavailable_error import ServiceUnavailableError
|
|
14
|
+
from json.decoder import JSONDecodeError
|
|
15
|
+
from ..core.api_error import ApiError
|
|
16
|
+
from ..core.client_wrapper import AsyncClientWrapper
|
|
17
|
+
|
|
18
|
+
# this is used as the default value for optional parameters
|
|
19
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VoiceChangerClient:
|
|
23
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
24
|
+
self._client_wrapper = client_wrapper
|
|
25
|
+
|
|
26
|
+
def convert(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
audio_duration: typing.Optional[float] = OMIT,
|
|
30
|
+
channel_type: typing.Optional[str] = OMIT,
|
|
31
|
+
encode_output_as_base_64: typing.Optional[bool] = OMIT,
|
|
32
|
+
file: typing.Optional[core.File] = OMIT,
|
|
33
|
+
file_url: typing.Optional[str] = OMIT,
|
|
34
|
+
format: typing.Optional[str] = OMIT,
|
|
35
|
+
multi_native_locale: typing.Optional[str] = OMIT,
|
|
36
|
+
pitch: typing.Optional[int] = OMIT,
|
|
37
|
+
pronunciation_dictionary: typing.Optional[str] = OMIT,
|
|
38
|
+
rate: typing.Optional[int] = OMIT,
|
|
39
|
+
retain_accent: typing.Optional[bool] = OMIT,
|
|
40
|
+
retain_prosody: typing.Optional[bool] = OMIT,
|
|
41
|
+
return_transcription: typing.Optional[bool] = OMIT,
|
|
42
|
+
sample_rate: typing.Optional[float] = OMIT,
|
|
43
|
+
style: typing.Optional[str] = OMIT,
|
|
44
|
+
transcription: typing.Optional[str] = OMIT,
|
|
45
|
+
variation: typing.Optional[int] = OMIT,
|
|
46
|
+
voice_id: typing.Optional[str] = OMIT,
|
|
47
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
48
|
+
) -> SpeechToSpeechResponse:
|
|
49
|
+
"""
|
|
50
|
+
Returns a url to the generated audio file along with other associated properties.
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
audio_duration : typing.Optional[float]
|
|
55
|
+
This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
|
|
56
|
+
|
|
57
|
+
channel_type : typing.Optional[str]
|
|
58
|
+
Valid values: STEREO, MONO
|
|
59
|
+
|
|
60
|
+
encode_output_as_base_64 : typing.Optional[bool]
|
|
61
|
+
Set to true to receive audio in response as a Base64 encoded string along with a url.
|
|
62
|
+
|
|
63
|
+
file : typing.Optional[core.File]
|
|
64
|
+
See core.File for more documentation
|
|
65
|
+
|
|
66
|
+
file_url : typing.Optional[str]
|
|
67
|
+
|
|
68
|
+
format : typing.Optional[str]
|
|
69
|
+
Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW
|
|
70
|
+
|
|
71
|
+
multi_native_locale : typing.Optional[str]
|
|
72
|
+
Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
|
|
73
|
+
Valid values: "en-US", "en-UK", "es-ES", etc.
|
|
74
|
+
|
|
75
|
+
Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
|
|
76
|
+
|
|
77
|
+
pitch : typing.Optional[int]
|
|
78
|
+
Pitch of the voiceover
|
|
79
|
+
|
|
80
|
+
pronunciation_dictionary : typing.Optional[str]
|
|
81
|
+
A JSON string that defines custom pronunciations for specific words or phrases. Each key is a word or phrase, and its value is an object with `type` and `pronunciation`.
|
|
82
|
+
|
|
83
|
+
Example 1: '{"live": {"type": "IPA", "pronunciation": "laɪv"}}'
|
|
84
|
+
|
|
85
|
+
Example 2: '{"2022": {"type": "SAY_AS", "pronunciation": "twenty twenty two"}}'
|
|
86
|
+
|
|
87
|
+
rate : typing.Optional[int]
|
|
88
|
+
Speed of the voiceover
|
|
89
|
+
|
|
90
|
+
retain_accent : typing.Optional[bool]
|
|
91
|
+
Set to true to retain the original accent of the speaker during voice generation.
|
|
92
|
+
|
|
93
|
+
retain_prosody : typing.Optional[bool]
|
|
94
|
+
Indicates whether to retain the original prosody (intonation, rhythm, and stress) of the input voice in the generated output.
|
|
95
|
+
|
|
96
|
+
return_transcription : typing.Optional[bool]
|
|
97
|
+
Set to true to include a textual transcription of the generated audio in the response.
|
|
98
|
+
|
|
99
|
+
sample_rate : typing.Optional[float]
|
|
100
|
+
Valid values are 8000, 24000, 44100, 48000
|
|
101
|
+
|
|
102
|
+
style : typing.Optional[str]
|
|
103
|
+
The voice style to be used for voiceover generation.
|
|
104
|
+
|
|
105
|
+
transcription : typing.Optional[str]
|
|
106
|
+
This parameter allows specifying a transcription of the audio clip, which will then be used as input for the voice changer
|
|
107
|
+
|
|
108
|
+
variation : typing.Optional[int]
|
|
109
|
+
Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
|
|
110
|
+
|
|
111
|
+
voice_id : typing.Optional[str]
|
|
112
|
+
Use the GET /v1/speech/voices api to find supported voiceIds.
|
|
113
|
+
|
|
114
|
+
request_options : typing.Optional[RequestOptions]
|
|
115
|
+
Request-specific configuration.
|
|
116
|
+
|
|
117
|
+
Returns
|
|
118
|
+
-------
|
|
119
|
+
SpeechToSpeechResponse
|
|
120
|
+
Ok
|
|
121
|
+
|
|
122
|
+
Examples
|
|
123
|
+
--------
|
|
124
|
+
from murf import Murf
|
|
125
|
+
|
|
126
|
+
client = Murf(
|
|
127
|
+
api_key="YOUR_API_KEY",
|
|
128
|
+
)
|
|
129
|
+
client.voice_changer.convert()
|
|
130
|
+
"""
|
|
131
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
132
|
+
"v1/voice-changer/convert",
|
|
133
|
+
method="POST",
|
|
134
|
+
data={
|
|
135
|
+
"audio_duration": audio_duration,
|
|
136
|
+
"channel_type": channel_type,
|
|
137
|
+
"encode_output_as_base64": encode_output_as_base_64,
|
|
138
|
+
"file_url": file_url,
|
|
139
|
+
"format": format,
|
|
140
|
+
"multi_native_locale": multi_native_locale,
|
|
141
|
+
"pitch": pitch,
|
|
142
|
+
"pronunciation_dictionary": pronunciation_dictionary,
|
|
143
|
+
"rate": rate,
|
|
144
|
+
"retain_accent": retain_accent,
|
|
145
|
+
"retain_prosody": retain_prosody,
|
|
146
|
+
"return_transcription": return_transcription,
|
|
147
|
+
"sample_rate": sample_rate,
|
|
148
|
+
"style": style,
|
|
149
|
+
"transcription": transcription,
|
|
150
|
+
"variation": variation,
|
|
151
|
+
"voice_id": voice_id,
|
|
152
|
+
},
|
|
153
|
+
files={
|
|
154
|
+
"file": file,
|
|
155
|
+
},
|
|
156
|
+
request_options=request_options,
|
|
157
|
+
omit=OMIT,
|
|
158
|
+
)
|
|
159
|
+
try:
|
|
160
|
+
if 200 <= _response.status_code < 300:
|
|
161
|
+
return typing.cast(
|
|
162
|
+
SpeechToSpeechResponse,
|
|
163
|
+
parse_obj_as(
|
|
164
|
+
type_=SpeechToSpeechResponse, # type: ignore
|
|
165
|
+
object_=_response.json(),
|
|
166
|
+
),
|
|
167
|
+
)
|
|
168
|
+
if _response.status_code == 400:
|
|
169
|
+
raise BadRequestError(
|
|
170
|
+
typing.cast(
|
|
171
|
+
typing.Optional[typing.Any],
|
|
172
|
+
parse_obj_as(
|
|
173
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
174
|
+
object_=_response.json(),
|
|
175
|
+
),
|
|
176
|
+
)
|
|
177
|
+
)
|
|
178
|
+
if _response.status_code == 402:
|
|
179
|
+
raise PaymentRequiredError(
|
|
180
|
+
typing.cast(
|
|
181
|
+
typing.Optional[typing.Any],
|
|
182
|
+
parse_obj_as(
|
|
183
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
184
|
+
object_=_response.json(),
|
|
185
|
+
),
|
|
186
|
+
)
|
|
187
|
+
)
|
|
188
|
+
if _response.status_code == 403:
|
|
189
|
+
raise ForbiddenError(
|
|
190
|
+
typing.cast(
|
|
191
|
+
typing.Optional[typing.Any],
|
|
192
|
+
parse_obj_as(
|
|
193
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
194
|
+
object_=_response.json(),
|
|
195
|
+
),
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
if _response.status_code == 500:
|
|
199
|
+
raise InternalServerError(
|
|
200
|
+
typing.cast(
|
|
201
|
+
typing.Optional[typing.Any],
|
|
202
|
+
parse_obj_as(
|
|
203
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
204
|
+
object_=_response.json(),
|
|
205
|
+
),
|
|
206
|
+
)
|
|
207
|
+
)
|
|
208
|
+
if _response.status_code == 503:
|
|
209
|
+
raise ServiceUnavailableError(
|
|
210
|
+
typing.cast(
|
|
211
|
+
typing.Optional[typing.Any],
|
|
212
|
+
parse_obj_as(
|
|
213
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
214
|
+
object_=_response.json(),
|
|
215
|
+
),
|
|
216
|
+
)
|
|
217
|
+
)
|
|
218
|
+
_response_json = _response.json()
|
|
219
|
+
except JSONDecodeError:
|
|
220
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
221
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
class AsyncVoiceChangerClient:
|
|
225
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
226
|
+
self._client_wrapper = client_wrapper
|
|
227
|
+
|
|
228
|
+
async def convert(
|
|
229
|
+
self,
|
|
230
|
+
*,
|
|
231
|
+
audio_duration: typing.Optional[float] = OMIT,
|
|
232
|
+
channel_type: typing.Optional[str] = OMIT,
|
|
233
|
+
encode_output_as_base_64: typing.Optional[bool] = OMIT,
|
|
234
|
+
file: typing.Optional[core.File] = OMIT,
|
|
235
|
+
file_url: typing.Optional[str] = OMIT,
|
|
236
|
+
format: typing.Optional[str] = OMIT,
|
|
237
|
+
multi_native_locale: typing.Optional[str] = OMIT,
|
|
238
|
+
pitch: typing.Optional[int] = OMIT,
|
|
239
|
+
pronunciation_dictionary: typing.Optional[str] = OMIT,
|
|
240
|
+
rate: typing.Optional[int] = OMIT,
|
|
241
|
+
retain_accent: typing.Optional[bool] = OMIT,
|
|
242
|
+
retain_prosody: typing.Optional[bool] = OMIT,
|
|
243
|
+
return_transcription: typing.Optional[bool] = OMIT,
|
|
244
|
+
sample_rate: typing.Optional[float] = OMIT,
|
|
245
|
+
style: typing.Optional[str] = OMIT,
|
|
246
|
+
transcription: typing.Optional[str] = OMIT,
|
|
247
|
+
variation: typing.Optional[int] = OMIT,
|
|
248
|
+
voice_id: typing.Optional[str] = OMIT,
|
|
249
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
250
|
+
) -> SpeechToSpeechResponse:
|
|
251
|
+
"""
|
|
252
|
+
Returns a url to the generated audio file along with other associated properties.
|
|
253
|
+
|
|
254
|
+
Parameters
|
|
255
|
+
----------
|
|
256
|
+
audio_duration : typing.Optional[float]
|
|
257
|
+
This parameter allows specifying the duration (in seconds) for the generated audio. If the value is 0, this parameter will be ignored. Only available for Gen2 model.
|
|
258
|
+
|
|
259
|
+
channel_type : typing.Optional[str]
|
|
260
|
+
Valid values: STEREO, MONO
|
|
261
|
+
|
|
262
|
+
encode_output_as_base_64 : typing.Optional[bool]
|
|
263
|
+
Set to true to receive audio in response as a Base64 encoded string along with a url.
|
|
264
|
+
|
|
265
|
+
file : typing.Optional[core.File]
|
|
266
|
+
See core.File for more documentation
|
|
267
|
+
|
|
268
|
+
file_url : typing.Optional[str]
|
|
269
|
+
|
|
270
|
+
format : typing.Optional[str]
|
|
271
|
+
Format of the generated audio file. Valid values: MP3, WAV, FLAC, ALAW, ULAW
|
|
272
|
+
|
|
273
|
+
multi_native_locale : typing.Optional[str]
|
|
274
|
+
Specifies the language for the generated audio, enabling a voice to speak in multiple languages natively. Only available in the Gen2 model.
|
|
275
|
+
Valid values: "en-US", "en-UK", "es-ES", etc.
|
|
276
|
+
|
|
277
|
+
Use the GET /v1/speech/voices endpoint to retrieve the list of available voices and languages.
|
|
278
|
+
|
|
279
|
+
pitch : typing.Optional[int]
|
|
280
|
+
Pitch of the voiceover
|
|
281
|
+
|
|
282
|
+
pronunciation_dictionary : typing.Optional[str]
|
|
283
|
+
A JSON string that defines custom pronunciations for specific words or phrases. Each key is a word or phrase, and its value is an object with `type` and `pronunciation`.
|
|
284
|
+
|
|
285
|
+
Example 1: '{"live": {"type": "IPA", "pronunciation": "laɪv"}}'
|
|
286
|
+
|
|
287
|
+
Example 2: '{"2022": {"type": "SAY_AS", "pronunciation": "twenty twenty two"}}'
|
|
288
|
+
|
|
289
|
+
rate : typing.Optional[int]
|
|
290
|
+
Speed of the voiceover
|
|
291
|
+
|
|
292
|
+
retain_accent : typing.Optional[bool]
|
|
293
|
+
Set to true to retain the original accent of the speaker during voice generation.
|
|
294
|
+
|
|
295
|
+
retain_prosody : typing.Optional[bool]
|
|
296
|
+
Indicates whether to retain the original prosody (intonation, rhythm, and stress) of the input voice in the generated output.
|
|
297
|
+
|
|
298
|
+
return_transcription : typing.Optional[bool]
|
|
299
|
+
Set to true to include a textual transcription of the generated audio in the response.
|
|
300
|
+
|
|
301
|
+
sample_rate : typing.Optional[float]
|
|
302
|
+
Valid values are 8000, 24000, 44100, 48000
|
|
303
|
+
|
|
304
|
+
style : typing.Optional[str]
|
|
305
|
+
The voice style to be used for voiceover generation.
|
|
306
|
+
|
|
307
|
+
transcription : typing.Optional[str]
|
|
308
|
+
This parameter allows specifying a transcription of the audio clip, which will then be used as input for the voice changer
|
|
309
|
+
|
|
310
|
+
variation : typing.Optional[int]
|
|
311
|
+
Higher values will add more variation in terms of Pause, Pitch, and Speed to the voice. Only available for Gen2 model.
|
|
312
|
+
|
|
313
|
+
voice_id : typing.Optional[str]
|
|
314
|
+
Use the GET /v1/speech/voices api to find supported voiceIds.
|
|
315
|
+
|
|
316
|
+
request_options : typing.Optional[RequestOptions]
|
|
317
|
+
Request-specific configuration.
|
|
318
|
+
|
|
319
|
+
Returns
|
|
320
|
+
-------
|
|
321
|
+
SpeechToSpeechResponse
|
|
322
|
+
Ok
|
|
323
|
+
|
|
324
|
+
Examples
|
|
325
|
+
--------
|
|
326
|
+
import asyncio
|
|
327
|
+
|
|
328
|
+
from murf import AsyncMurf
|
|
329
|
+
|
|
330
|
+
client = AsyncMurf(
|
|
331
|
+
api_key="YOUR_API_KEY",
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
async def main() -> None:
|
|
336
|
+
await client.voice_changer.convert()
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
asyncio.run(main())
|
|
340
|
+
"""
|
|
341
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
342
|
+
"v1/voice-changer/convert",
|
|
343
|
+
method="POST",
|
|
344
|
+
data={
|
|
345
|
+
"audio_duration": audio_duration,
|
|
346
|
+
"channel_type": channel_type,
|
|
347
|
+
"encode_output_as_base64": encode_output_as_base_64,
|
|
348
|
+
"file_url": file_url,
|
|
349
|
+
"format": format,
|
|
350
|
+
"multi_native_locale": multi_native_locale,
|
|
351
|
+
"pitch": pitch,
|
|
352
|
+
"pronunciation_dictionary": pronunciation_dictionary,
|
|
353
|
+
"rate": rate,
|
|
354
|
+
"retain_accent": retain_accent,
|
|
355
|
+
"retain_prosody": retain_prosody,
|
|
356
|
+
"return_transcription": return_transcription,
|
|
357
|
+
"sample_rate": sample_rate,
|
|
358
|
+
"style": style,
|
|
359
|
+
"transcription": transcription,
|
|
360
|
+
"variation": variation,
|
|
361
|
+
"voice_id": voice_id,
|
|
362
|
+
},
|
|
363
|
+
files={
|
|
364
|
+
"file": file,
|
|
365
|
+
},
|
|
366
|
+
request_options=request_options,
|
|
367
|
+
omit=OMIT,
|
|
368
|
+
)
|
|
369
|
+
try:
|
|
370
|
+
if 200 <= _response.status_code < 300:
|
|
371
|
+
return typing.cast(
|
|
372
|
+
SpeechToSpeechResponse,
|
|
373
|
+
parse_obj_as(
|
|
374
|
+
type_=SpeechToSpeechResponse, # type: ignore
|
|
375
|
+
object_=_response.json(),
|
|
376
|
+
),
|
|
377
|
+
)
|
|
378
|
+
if _response.status_code == 400:
|
|
379
|
+
raise BadRequestError(
|
|
380
|
+
typing.cast(
|
|
381
|
+
typing.Optional[typing.Any],
|
|
382
|
+
parse_obj_as(
|
|
383
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
384
|
+
object_=_response.json(),
|
|
385
|
+
),
|
|
386
|
+
)
|
|
387
|
+
)
|
|
388
|
+
if _response.status_code == 402:
|
|
389
|
+
raise PaymentRequiredError(
|
|
390
|
+
typing.cast(
|
|
391
|
+
typing.Optional[typing.Any],
|
|
392
|
+
parse_obj_as(
|
|
393
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
394
|
+
object_=_response.json(),
|
|
395
|
+
),
|
|
396
|
+
)
|
|
397
|
+
)
|
|
398
|
+
if _response.status_code == 403:
|
|
399
|
+
raise ForbiddenError(
|
|
400
|
+
typing.cast(
|
|
401
|
+
typing.Optional[typing.Any],
|
|
402
|
+
parse_obj_as(
|
|
403
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
404
|
+
object_=_response.json(),
|
|
405
|
+
),
|
|
406
|
+
)
|
|
407
|
+
)
|
|
408
|
+
if _response.status_code == 500:
|
|
409
|
+
raise InternalServerError(
|
|
410
|
+
typing.cast(
|
|
411
|
+
typing.Optional[typing.Any],
|
|
412
|
+
parse_obj_as(
|
|
413
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
414
|
+
object_=_response.json(),
|
|
415
|
+
),
|
|
416
|
+
)
|
|
417
|
+
)
|
|
418
|
+
if _response.status_code == 503:
|
|
419
|
+
raise ServiceUnavailableError(
|
|
420
|
+
typing.cast(
|
|
421
|
+
typing.Optional[typing.Any],
|
|
422
|
+
parse_obj_as(
|
|
423
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
424
|
+
object_=_response.json(),
|
|
425
|
+
),
|
|
426
|
+
)
|
|
427
|
+
)
|
|
428
|
+
_response_json = _response.json()
|
|
429
|
+
except JSONDecodeError:
|
|
430
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
431
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: murf
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.2
|
|
4
4
|
Summary:
|
|
5
5
|
Requires-Python: >=3.8,<4.0
|
|
6
6
|
Classifier: Intended Audience :: Developers
|
|
@@ -48,7 +48,7 @@ Description-Content-Type: text/markdown
|
|
|
48
48
|
|
|
49
49
|
## Overview
|
|
50
50
|
|
|
51
|
-
The Murf Python SDK offers seamless integration with the [Murf AI](https://murf.ai/) [text-to-speech software](https://murf.ai/text-to-speech), enabling developers and creators to convert text into lifelike speech effortlessly. With over 130 natural-sounding voices across
|
|
51
|
+
The Murf Python SDK offers seamless integration with the [Murf AI](https://murf.ai/) [text-to-speech software](https://murf.ai/text-to-speech), enabling developers and creators to convert text into lifelike speech effortlessly. With over 130 natural-sounding voices across 21 languages and 20+ speaking styles, Murf provides unparalleled speech customization for a wide range of applications. The SDK is designed for both synchronous and asynchronous workflows, featuring robust error handling, advanced configuration options, and support for real-time applications.
|
|
52
52
|
|
|
53
53
|
---
|
|
54
54
|
|
|
@@ -89,9 +89,9 @@ For more detailed information, refer to the [official documentation](https://mur
|
|
|
89
89
|
## Features
|
|
90
90
|
|
|
91
91
|
- **Text-to-Speech Conversion:** Transform text into natural-sounding speech.
|
|
92
|
-
- **Multilingual Support:** Access voices in over
|
|
92
|
+
- **Multilingual Support:** Access voices in over 21 languages, including English, French, German, Spanish, Italian, Hindi, Portuguese, Dutch, Korean, Chinese (Mandarin), Bengali, Tamil, Polish, Japanese, Turkish, Indonesian, Croatian, Greek, Romanian, Slovak, and Bulgarian.
|
|
93
93
|
|
|
94
|
-

|
|
95
95
|
|
|
96
96
|
- **Multiple Voice Styles:** Choose from 20+ speaking styles to suit your application's needs.
|
|
97
97
|
- **Advanced Voice Customization:** Adjust parameters like pitch, speed, pauses, and pronunciation for optimal output. Fine-grained controls let you tailor the voice output to match your specific requirements.
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
murf/__init__.py,sha256=
|
|
1
|
+
murf/__init__.py,sha256=TkqLNGuu7H0kvP3wYhLf_vfOo7deH6P422JMZ66A9HA,2205
|
|
2
2
|
murf/auth/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
3
3
|
murf/auth/client.py,sha256=mXUOPNju_oGpemo5yb-psqdru7b409R-smTw9heBh54,6118
|
|
4
|
-
murf/base_client.py,sha256=
|
|
4
|
+
murf/base_client.py,sha256=lkptrkFgokPx7QXBQt767h71N3uwtFIVYsXqOPgIjFc,6343
|
|
5
5
|
murf/client.py,sha256=sE8Ob-pPnAIpHwxyvCpYOgheNsjH5DwD_ei__8il5Yo,4109
|
|
6
6
|
murf/core/__init__.py,sha256=SQ85PF84B9MuKnBwHNHWemSGuy-g_515gFYNFhvEE0I,1438
|
|
7
7
|
murf/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
|
8
|
-
murf/core/client_wrapper.py,sha256=
|
|
8
|
+
murf/core/client_wrapper.py,sha256=k_TouDAcLZCyKftrsc4JPYYH6RZpjpFZaHKyZILwqzM,1995
|
|
9
9
|
murf/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
|
|
10
10
|
murf/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
|
|
11
11
|
murf/core/http_client.py,sha256=siUQ6UV0ARZALlxubqWSSAAPC9B4VW8y6MGlHStfaeo,19552
|
|
@@ -38,11 +38,13 @@ murf/errors/payment_required_error.py,sha256=7xjVK_OFqWNU7L0BloP5ns1kavWIbC2XMfb
|
|
|
38
38
|
murf/errors/service_unavailable_error.py,sha256=aiWJkLwL3ZF8DUhnHA7DncgPR-gAA9Omj86XByHSVlg,272
|
|
39
39
|
murf/errors/unauthorized_error.py,sha256=1ewNCqSG1P-uogB5yCNwreq4Bf3VRor0woSOXS4NjPU,266
|
|
40
40
|
murf/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
|
+
murf/text/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
42
|
+
murf/text/client.py,sha256=Aownk0rY8RvIREiksxl1jPYLhNXyMjyZ1vJaQcaXD9I,9124
|
|
41
43
|
murf/text_to_speech/__init__.py,sha256=Xv0-uZoCGDrfYszMbB2sh5nsApPBrcmek6SDgljYfxA,167
|
|
42
|
-
murf/text_to_speech/client.py,sha256=
|
|
44
|
+
murf/text_to_speech/client.py,sha256=vKFONdvh6vduo0LzVuH7A7o85MhjVpEInwMxazbnImE,35025
|
|
43
45
|
murf/text_to_speech/types/__init__.py,sha256=Hq1g0iZQ8sHQVU32Pt6lEetGXKEUUntytBmIq4bVK0A,199
|
|
44
46
|
murf/text_to_speech/types/generate_speech_request_model_version.py,sha256=y0NvjvNgtPrNcywVnq11MjANl-hJ7m-S5M_Ik5NP7c8,173
|
|
45
|
-
murf/types/__init__.py,sha256=
|
|
47
|
+
murf/types/__init__.py,sha256=Nvq_pfebZdLeBJ4Rupwdov89hlkMNfJyq5E0q1NLCdY,2127
|
|
46
48
|
murf/types/api_job_response.py,sha256=-0sasAgDmTzyn_7KVpYcRC5pCzm-QowtQNmrG6qpz6s,1388
|
|
47
49
|
murf/types/api_job_response_dubbing_type.py,sha256=-tnqFfuSOXSZngLhHBLg0Dn_X4fP5XIGMsI1TewVxig,168
|
|
48
50
|
murf/types/api_job_response_priority.py,sha256=27xuE25oMRVKzRXc-CLv_mu7ADr_PRsvNDXa4aFbt-k,171
|
|
@@ -51,21 +53,28 @@ murf/types/api_project_response_dubbing_type.py,sha256=ir-drn3sD8Qdbe6RWeJdPQyxx
|
|
|
51
53
|
murf/types/api_voice.py,sha256=q56eq_iz_NnSCDWzxRvsJKFOU2Tje8vCrXHg1AmKf4U,1920
|
|
52
54
|
murf/types/api_voice_gender.py,sha256=d0HtLLsXDckaiNE3mb7wojzRi6YwqzFiEnqdNecfo-E,169
|
|
53
55
|
murf/types/auth_token_response.py,sha256=kc04BEfbgsTMuEnxGhs1hI3-4gtrOTOaKpR16Mg9Cc8,916
|
|
56
|
+
murf/types/character_count.py,sha256=lDD1Bslx0bhewrJ7pYnTNx6ykmBnXoWxTuV-J6r19kQ,663
|
|
54
57
|
murf/types/dub_api_detail_response.py,sha256=7Rgy1ygNuWGVPmnEUoQYh3B7owBrlpFKZ_PvvUZf-YA,772
|
|
55
58
|
murf/types/dub_job_status_response.py,sha256=TNqCSOnO3kANySbyynNxQDYXpb_ewPp-meD1TFtV6NE,1123
|
|
56
59
|
murf/types/form_data_content_disposition.py,sha256=kecApnkfJhGUcM9lP1Yc7khTrA5C-uMzaLeUH3TfQBI,1307
|
|
57
|
-
murf/types/generate_speech_response.py,sha256=
|
|
60
|
+
murf/types/generate_speech_response.py,sha256=4ShyktWhUAik8nD4E97Vh-7LELJgqKc4wbnOfNBmMdE,1633
|
|
58
61
|
murf/types/group_api_project_response.py,sha256=cgKB9I02vDH_t1rYOSM5z6p25CzDOZrZjASQpt-HWyA,745
|
|
59
62
|
murf/types/locale_response.py,sha256=2ix9RuJ_-Qw95XWTeTP-15a6kFTROReEQS0vn3Da2uU,773
|
|
60
63
|
murf/types/locale_response_supports_item.py,sha256=yxGwpkzpeIfpG1KzygTHGjU1nK3U_plweHpxrbUWyT4,169
|
|
64
|
+
murf/types/metadata.py,sha256=jROjd-DtKykNMS5L4iGVyKdoa-5F-2HeoqYI138QkLw,738
|
|
65
|
+
murf/types/murf_api_translation_response.py,sha256=N-iRMtFGZSMvEOxTzgYcktgyo74EO9IDCI32QuwnLiw,737
|
|
61
66
|
murf/types/pronunciation_detail.py,sha256=wm8asAQsYkL6O7QIhFl9tDEvo23r50-BUWamzdybHcg,937
|
|
62
67
|
murf/types/pronunciation_detail_type.py,sha256=ihRcpMJJBEZGo59TFZefg_tfl_LeU0wlOXfMjkUNmh0,164
|
|
63
68
|
murf/types/source_locale_response.py,sha256=hCIpMkpAKfCy2jqMCCLLqNnC_E0h-gDRyATnmDHNR2g,583
|
|
69
|
+
murf/types/speech_to_speech_response.py,sha256=NMKRfNHzUFQ4bpCUQ_L7BfFpnwQ8AEMrJ7WYbh4GPzg,1447
|
|
64
70
|
murf/types/style_details.py,sha256=Gh2ev6dv5K0uZU1ZRViWfITpJ5r7LmQvwV3PcxF1RKE,799
|
|
65
|
-
murf/types/
|
|
71
|
+
murf/types/translation.py,sha256=t1CFv_3PlsUgprZEb4EBLdy48StDgCfZ5qwc-rE8gVw,634
|
|
72
|
+
murf/types/word_duration_response.py,sha256=bdW6yDcIWrMgAuQqp-MWseHpfLPexim-BWzHYOIArSs,1508
|
|
66
73
|
murf/utils.py,sha256=VzRmn8e3fbDVYW7IlfGrcPwgu44PA2kBYRcZ7ZVNYUk,1259
|
|
67
74
|
murf/version.py,sha256=pVGisqquGqFs8v4SJPE2o540FaaDKHWfOikWf4-9KKk,71
|
|
68
|
-
murf
|
|
69
|
-
murf
|
|
70
|
-
murf-1.2.
|
|
71
|
-
murf-1.2.
|
|
75
|
+
murf/voice_changer/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
76
|
+
murf/voice_changer/client.py,sha256=5665S18ZvySKE1EbYhWAAKXHM9DhNCF3bG5fVNqJY3I,17490
|
|
77
|
+
murf-1.2.2.dist-info/LICENSE,sha256=SxRdfCVAmnkiSsVHJHhXmRX0yqidcRlBMjy-R2GZFdM,1066
|
|
78
|
+
murf-1.2.2.dist-info/METADATA,sha256=saNXgU2X25QGch66OSitCuP7M--S9UmSW36_XnvYTBs,6848
|
|
79
|
+
murf-1.2.2.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
|
80
|
+
murf-1.2.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|