yta-audio-narration 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,217 +1,220 @@
1
- """
2
- This voice engine is based in Coqui and I
3
- have another voice engine which is Coqui,
4
- but using other voice narrators.
5
-
6
- TODO: Consider mixing both voice engines
7
- and appending this voice narrator to the
8
- Coqui system and keep only one of them.
9
-
10
- -- Update 19/04/2025 --
11
- I've found that they created a fork in
12
- https://github.com/idiap/coqui-ai-TTS with
13
- a new version that is maintained, and the
14
- 'tts' was generating conflicts.
15
- """
16
- from yta_audio_narration_common.consts import DEFAULT_VOICE
17
- from yta_audio_narration_common.enums import NarrationLanguage, VoiceEmotion, VoiceSpeed, VoicePitch
18
- from yta_audio_narration_common.voice import NarrationVoice
19
- from yta_constants.enum import YTAEnum as Enum
20
- from yta_constants.file import FileType
21
- from yta_programming.output import Output
22
- from typing import Union
23
- from TTS.api import TTS
24
-
25
-
26
- """
27
- The options below are specified even if we
28
- don't use them later when processing the
29
- voice narration. This is to keep the same
30
- structure for any voice narration and to
31
- simplify the way we offer the options in
32
- an API that is able to make requests.
33
- """
34
-
35
- # 1. The voices we accept, as Enums
36
- class TortoiseVoiceName(Enum):
37
- """
38
- Available voices. The value is what is used
39
- for the audio creation.
40
- """
41
-
42
- DEFAULT = DEFAULT_VOICE
43
- SPANISH = 'es_002'
44
- MEXICAN = 'es_mx_002'
45
- # TODO: There a a lot of English US and more languages voices
46
-
47
- # 2. The languages we accept
48
- LANGUAGE_OPTIONS = [
49
- NarrationLanguage.SPANISH,
50
- NarrationLanguage.DEFAULT
51
- ]
52
-
53
- # 3. The emotions we accept
54
- EMOTION_OPTIONS = [
55
- VoiceEmotion.DEFAULT,
56
- VoiceEmotion.NORMAL,
57
- ]
58
-
59
- # 4. The speeds we accept
60
- SPEED_OPTIONS = [
61
- VoiceSpeed.DEFAULT,
62
- VoiceSpeed.NORMAL,
63
- ]
64
-
65
- # 5. The pitches we accept
66
- PITCH_OPTIONS = [
67
- VoicePitch.DEFAULT,
68
- VoicePitch.NORMAL,
69
- ]
70
-
71
- class TortoiseNarrationVoice(NarrationVoice):
72
- """
73
- Voice instance to be used when narrating with
74
- Tortoise engine.
75
- """
76
-
77
- @property
78
- def processed_name(
79
- self
80
- ) -> str:
81
- """
82
- Get the usable name value from the one that has
83
- been set when instantiating the instance.
84
- """
85
- # TODO: We are not using voice names here
86
- return None
87
-
88
- @property
89
- def processed_emotion(
90
- self
91
- ) -> str:
92
- """
93
- Get the usable emotion value from the one that
94
- has been set when instantiating the instance.
95
- """
96
- # This narration is not able to handle any
97
- # emotion (at least by now)
98
- return None
1
+ # TODO: Mix this narrator with the coqui
2
+ # because they are using the same engine
3
+
4
+ # """
5
+ # This voice engine is based in Coqui and I
6
+ # have another voice engine which is Coqui,
7
+ # but using other voice narrators.
8
+
9
+ # TODO: Consider mixing both voice engines
10
+ # and appending this voice narrator to the
11
+ # Coqui system and keep only one of them.
12
+
13
+ # -- Update 19/04/2025 --
14
+ # I've found that they created a fork in
15
+ # https://github.com/idiap/coqui-ai-TTS with
16
+ # a new version that is maintained, and the
17
+ # 'tts' was generating conflicts.
18
+ # """
19
+ # from yta_audio_narration_common.consts import DEFAULT_VOICE
20
+ # from yta_audio_narration_common.enums import NarrationLanguage, VoiceEmotion, VoiceSpeed, VoicePitch
21
+ # from yta_audio_narration_common.voice import NarrationVoice
22
+ # from yta_constants.enum import YTAEnum as Enum
23
+ # from yta_constants.file import FileType
24
+ # from yta_programming.output import Output
25
+ # from typing import Union
26
+ # from TTS.api import TTS
27
+
28
+
29
+ # """
30
+ # The options below are specified even if we
31
+ # don't use them later when processing the
32
+ # voice narration. This is to keep the same
33
+ # structure for any voice narration and to
34
+ # simplify the way we offer the options in
35
+ # an API that is able to make requests.
36
+ # """
37
+
38
+ # # 1. The voices we accept, as Enums
39
+ # class TortoiseVoiceName(Enum):
40
+ # """
41
+ # Available voices. The value is what is used
42
+ # for the audio creation.
43
+ # """
44
+
45
+ # DEFAULT = DEFAULT_VOICE
46
+ # SPANISH = 'es_002'
47
+ # MEXICAN = 'es_mx_002'
48
+ # # TODO: There a a lot of English US and more languages voices
49
+
50
+ # # 2. The languages we accept
51
+ # LANGUAGE_OPTIONS = [
52
+ # NarrationLanguage.SPANISH,
53
+ # NarrationLanguage.DEFAULT
54
+ # ]
55
+
56
+ # # 3. The emotions we accept
57
+ # EMOTION_OPTIONS = [
58
+ # VoiceEmotion.DEFAULT,
59
+ # VoiceEmotion.NORMAL,
60
+ # ]
61
+
62
+ # # 4. The speeds we accept
63
+ # SPEED_OPTIONS = [
64
+ # VoiceSpeed.DEFAULT,
65
+ # VoiceSpeed.NORMAL,
66
+ # ]
67
+
68
+ # # 5. The pitches we accept
69
+ # PITCH_OPTIONS = [
70
+ # VoicePitch.DEFAULT,
71
+ # VoicePitch.NORMAL,
72
+ # ]
73
+
74
+ # class TortoiseNarrationVoice(NarrationVoice):
75
+ # """
76
+ # Voice instance to be used when narrating with
77
+ # Tortoise engine.
78
+ # """
79
+
80
+ # @property
81
+ # def processed_name(
82
+ # self
83
+ # ) -> str:
84
+ # """
85
+ # Get the usable name value from the one that has
86
+ # been set when instantiating the instance.
87
+ # """
88
+ # # TODO: We are not using voice names here
89
+ # return None
90
+
91
+ # @property
92
+ # def processed_emotion(
93
+ # self
94
+ # ) -> str:
95
+ # """
96
+ # Get the usable emotion value from the one that
97
+ # has been set when instantiating the instance.
98
+ # """
99
+ # # This narration is not able to handle any
100
+ # # emotion (at least by now)
101
+ # return None
99
102
 
100
- @property
101
- def processed_speed(
102
- self
103
- ) -> int:
104
- """
105
- Get the usable speed value from the one that
106
- has been set when instantiating the instance.
107
- """
108
- # This is not used here
109
- return None
110
-
111
- @property
112
- def processed_pitch(
113
- self
114
- ) -> int:
115
- """
116
- Get the usable pitch value from the one that
117
- has been set when instantiating the instance.
118
- """
119
- # This is not used here
120
- return None
103
+ # @property
104
+ # def processed_speed(
105
+ # self
106
+ # ) -> int:
107
+ # """
108
+ # Get the usable speed value from the one that
109
+ # has been set when instantiating the instance.
110
+ # """
111
+ # # This is not used here
112
+ # return None
113
+
114
+ # @property
115
+ # def processed_pitch(
116
+ # self
117
+ # ) -> int:
118
+ # """
119
+ # Get the usable pitch value from the one that
120
+ # has been set when instantiating the instance.
121
+ # """
122
+ # # This is not used here
123
+ # return None
121
124
 
122
- @property
123
- def processed_language(
124
- self
125
- ) -> str:
126
- """
127
- Get the usable language value from the one that
128
- has been set when instantiating the instance.
129
- """
130
- language = (
131
- NarrationLanguage.SPANISH
132
- if self.language == NarrationLanguage.DEFAULT else
133
- self.language
134
- )
135
-
136
- return {
137
- NarrationLanguage.SPANISH: 'es'
138
- }[language]
139
-
140
- def validate_and_process(
141
- self,
142
- name: str,
143
- emotion: VoiceEmotion,
144
- speed: VoiceSpeed,
145
- pitch: VoicePitch,
146
- language: NarrationLanguage
147
- ):
148
- TortoiseVoiceName.to_enum(name)
149
- if VoiceEmotion.to_enum(emotion) not in EMOTION_OPTIONS:
150
- raise Exception(f'The provided {emotion} is not valid for this narration voice.')
151
- if VoiceSpeed.to_enum(speed) not in SPEED_OPTIONS:
152
- raise Exception(f'The provided {speed} is not valid for this narration voice.')
153
- if VoicePitch.to_enum(pitch) not in PITCH_OPTIONS:
154
- raise Exception(f'The provided {pitch} is not valid for this narration voice.')
155
- if NarrationLanguage.to_enum(language) not in LANGUAGE_OPTIONS:
156
- raise Exception(f'The provided {language} is not valid for this narration voice.')
125
+ # @property
126
+ # def processed_language(
127
+ # self
128
+ # ) -> str:
129
+ # """
130
+ # Get the usable language value from the one that
131
+ # has been set when instantiating the instance.
132
+ # """
133
+ # language = (
134
+ # NarrationLanguage.SPANISH
135
+ # if self.language == NarrationLanguage.DEFAULT else
136
+ # self.language
137
+ # )
138
+
139
+ # return {
140
+ # NarrationLanguage.SPANISH: 'es'
141
+ # }[language]
142
+
143
+ # def validate_and_process(
144
+ # self,
145
+ # name: str,
146
+ # emotion: VoiceEmotion,
147
+ # speed: VoiceSpeed,
148
+ # pitch: VoicePitch,
149
+ # language: NarrationLanguage
150
+ # ):
151
+ # TortoiseVoiceName.to_enum(name)
152
+ # if VoiceEmotion.to_enum(emotion) not in EMOTION_OPTIONS:
153
+ # raise Exception(f'The provided {emotion} is not valid for this narration voice.')
154
+ # if VoiceSpeed.to_enum(speed) not in SPEED_OPTIONS:
155
+ # raise Exception(f'The provided {speed} is not valid for this narration voice.')
156
+ # if VoicePitch.to_enum(pitch) not in PITCH_OPTIONS:
157
+ # raise Exception(f'The provided {pitch} is not valid for this narration voice.')
158
+ # if NarrationLanguage.to_enum(language) not in LANGUAGE_OPTIONS:
159
+ # raise Exception(f'The provided {language} is not valid for this narration voice.')
157
160
 
158
- @staticmethod
159
- def default():
160
- return TortoiseNarrationVoice(
161
- name = TortoiseVoiceName.DEFAULT.value,
162
- emotion = VoiceEmotion.DEFAULT,
163
- speed = VoiceSpeed.DEFAULT,
164
- pitch = VoicePitch.DEFAULT,
165
- language = NarrationLanguage.DEFAULT
166
- )
161
+ # @staticmethod
162
+ # def default():
163
+ # return TortoiseNarrationVoice(
164
+ # name = TortoiseVoiceName.DEFAULT.value,
165
+ # emotion = VoiceEmotion.DEFAULT,
166
+ # speed = VoiceSpeed.DEFAULT,
167
+ # pitch = VoicePitch.DEFAULT,
168
+ # language = NarrationLanguage.DEFAULT
169
+ # )
167
170
 
168
- # The voices but for a specific language, to be able to
169
- # choose one when this is requested from the outside
170
- def get_narrator_names_by_language(
171
- language: NarrationLanguage
172
- ) -> list[str]:
173
- language = NarrationLanguage.to_enum(language)
174
- language = (
175
- NarrationLanguage.SPANISH
176
- if language is NarrationLanguage.DEFAULT else
177
- language
178
- )
179
-
180
- return {
181
- NarrationLanguage.SPANISH: [
182
- TortoiseVoiceName.DEFAULT.value,
183
- ]
184
- }[language]
185
-
186
- # All the remaining functionality we need to make it
187
- # work properly
188
- def narrate(
189
- text: str,
190
- voice: TortoiseNarrationVoice = TortoiseNarrationVoice.default(),
191
- output_filename: Union[str, None] = None
192
- ):
193
- """
194
- @deprecated
195
-
196
- TODO: Remove this file and method if useless. Please, read below to check.
197
- This method should be removed and also the file as it is only one specific
198
- model in TTS narration library. It is not a different system. So please,
199
- remove it if it won't be used.
200
- """
201
- output_filename = Output.get_filename(output_filename, FileType.AUDIO)
202
-
203
- # TODO: Delete tortoise lib?
204
- # TODO: Delete en/multi-datase/tortoise-v2 model
205
- tts = TTS("tts_models/es/multi-dataset/tortoise-v2")
206
-
207
- # Check code here: https://docs.coqui.ai/en/latest/models/tortoise.html
208
- tts.tts_to_file(text = text, language = voice.processed_language, file_path = output_filename)
209
-
210
- return output_filename
211
-
212
- #reference_clips = [utils.audio.load_audio(p, 22050) for p in clips_paths]
171
+ # # The voices but for a specific language, to be able to
172
+ # # choose one when this is requested from the outside
173
+ # def get_narrator_names_by_language(
174
+ # language: NarrationLanguage
175
+ # ) -> list[str]:
176
+ # language = NarrationLanguage.to_enum(language)
177
+ # language = (
178
+ # NarrationLanguage.SPANISH
179
+ # if language is NarrationLanguage.DEFAULT else
180
+ # language
181
+ # )
182
+
183
+ # return {
184
+ # NarrationLanguage.SPANISH: [
185
+ # TortoiseVoiceName.DEFAULT.value,
186
+ # ]
187
+ # }[language]
188
+
189
+ # # All the remaining functionality we need to make it
190
+ # # work properly
191
+ # def narrate(
192
+ # text: str,
193
+ # voice: TortoiseNarrationVoice = TortoiseNarrationVoice.default(),
194
+ # output_filename: Union[str, None] = None
195
+ # ):
196
+ # """
197
+ # @deprecated
198
+
199
+ # TODO: Remove this file and method if useless. Please, read below to check.
200
+ # This method should be removed and also the file as it is only one specific
201
+ # model in TTS narration library. It is not a different system. So please,
202
+ # remove it if it won't be used.
203
+ # """
204
+ # output_filename = Output.get_filename(output_filename, FileType.AUDIO)
205
+
206
+ # # TODO: Delete tortoise lib?
207
+ # # TODO: Delete en/multi-datase/tortoise-v2 model
208
+ # tts = TTS("tts_models/es/multi-dataset/tortoise-v2")
209
+
210
+ # # Check code here: https://docs.coqui.ai/en/latest/models/tortoise.html
211
+ # tts.tts_to_file(text = text, language = voice.processed_language, file_path = output_filename)
212
+
213
+ # return output_filename
214
+
215
+ # #reference_clips = [utils.audio.load_audio(p, 22050) for p in clips_paths]
213
216
 
214
- #pcm_audio = tts.tts(text)
215
- #pcm_audio = tts.tts_with_preset("your text here", voice_samples=reference_clips, preset='fast')
217
+ # #pcm_audio = tts.tts(text)
218
+ # #pcm_audio = tts.tts_with_preset("your text here", voice_samples=reference_clips, preset='fast')
216
219
 
217
- #from tortoise.utils.audio import load_audio, load_voice
220
+ # #from tortoise.utils.audio import load_audio, load_voice
@@ -1,266 +1,6 @@
1
1
  """
2
- This engine has been extracted from here:
3
- - https://ttsmp3.com/
4
-
5
- This voice engine has just a limit of
6
- 3.000 characters of input when generating
7
- with normal voices, and 1.000 daily
8
- characters when using AI. AI is disabled
9
- by now as the limit makes it not
10
- interesting for our purpose.
2
+ TODO: Do we make this optional or not? If it
3
+ is optional we can allow installing the libs
4
+ only if using this one.
11
5
  """
12
- from yta_audio_narration_common.consts import DEFAULT_VOICE
13
- from yta_audio_narration_common.enums import NarrationLanguage, VoiceEmotion, VoiceSpeed, VoicePitch
14
- from yta_audio_narration_common.voice import NarrationVoice
15
- from yta_file_downloader import Downloader
16
- from yta_constants.file import FileType
17
- from yta_constants.enum import YTAEnum as Enum
18
- from yta_programming.output import Output
19
- from typing import Union
20
-
21
- import requests
22
-
23
-
24
- """
25
- The options below are specified even if we
26
- don't use them later when processing the
27
- voice narration. This is to keep the same
28
- structure for any voice narration and to
29
- simplify the way we offer the options in
30
- an API that is able to make requests.
31
- """
32
-
33
- # 1. The voices we accept, as Enums
34
- class Ttsmp3VoiceName(Enum):
35
- # Normal voices below:
36
- DEFAULT = DEFAULT_VOICE
37
- LUPE = 'Lupe' # US Spanish
38
- PENELOPE = 'Penelope' # US Spanish
39
- MIGUEL = 'Miguel' # US Spanish
40
- # TODO: There are more voices for the different
41
- # languages, so jus copy the names here and you
42
- # will be able to use them
43
- # AI voices below:
44
- # ALLOY = 'alloy' # female
45
- # ECHO = 'echo' # male
46
- # FABLE = 'fable' # male
47
- # ONYX = 'onyx' # male (deeper voice)
48
- # NOVA = 'nova' # female (soft)
49
- # SHIMMER = 'shimmer' # female
50
-
51
- # 2. The languages we accept
52
- LANGUAGE_OPTIONS = [
53
- NarrationLanguage.DEFAULT
54
- ]
55
-
56
- # 3. The emotions we accept
57
- EMOTION_OPTIONS = [
58
- VoiceEmotion.DEFAULT,
59
- VoiceEmotion.NORMAL,
60
- ]
61
-
62
- # 4. The speeds we accept
63
- SPEED_OPTIONS = [
64
- VoiceSpeed.DEFAULT,
65
- VoiceSpeed.NORMAL,
66
- ]
67
-
68
- # 5. The pitches we accept
69
- PITCH_OPTIONS = [
70
- VoicePitch.DEFAULT,
71
- VoicePitch.NORMAL,
72
- ]
73
-
74
- class Ttsmp3NarrationVoice(NarrationVoice):
75
- """
76
- Voice instance to be used when narrating with
77
- Ttsmp3 engine.
78
- """
79
-
80
- @property
81
- def processed_name(
82
- self
83
- ) -> str:
84
- """
85
- Get the usable name value from the one that has
86
- been set when instantiating the instance.
87
- """
88
- return (
89
- Ttsmp3VoiceName.MIGUEL.value
90
- if Ttsmp3VoiceName.to_enum(self.name) == Ttsmp3VoiceName.DEFAULT else
91
- Ttsmp3VoiceName.to_enum(self.name).value
92
- )
93
-
94
- @property
95
- def processed_emotion(
96
- self
97
- ) -> str:
98
- """
99
- Get the usable emotion value from the one that
100
- has been set when instantiating the instance.
101
- """
102
- # This narration is not able to handle any
103
- # emotion (at least by now)
104
- return None
105
-
106
- @property
107
- def processed_speed(
108
- self
109
- ) -> int:
110
- """
111
- Get the usable speed value from the one that
112
- has been set when instantiating the instance.
113
- """
114
- # This is not used here
115
- return None
116
-
117
- @property
118
- def processed_pitch(
119
- self
120
- ) -> int:
121
- """
122
- Get the usable pitch value from the one that
123
- has been set when instantiating the instance.
124
- """
125
- # This is not used here
126
- return None
127
-
128
- @property
129
- def processed_language(
130
- self
131
- ) -> str:
132
- """
133
- Get the usable language value from the one that
134
- has been set when instantiating the instance.
135
- """
136
- # This engine has the language set in the voice
137
- # names so you should select a voice name that
138
- # is specific of the language you need
139
- return None
140
-
141
- def validate_and_process(
142
- self,
143
- name: str,
144
- emotion: VoiceEmotion,
145
- speed: VoiceSpeed,
146
- pitch: VoicePitch,
147
- language: NarrationLanguage
148
- ):
149
- Ttsmp3VoiceName.to_enum(name)
150
- if VoiceEmotion.to_enum(emotion) not in EMOTION_OPTIONS:
151
- raise Exception(f'The provided {emotion} is not valid for this narration voice.')
152
- if VoiceSpeed.to_enum(speed) not in SPEED_OPTIONS:
153
- raise Exception(f'The provided {speed} is not valid for this narration voice.')
154
- if VoicePitch.to_enum(pitch) not in PITCH_OPTIONS:
155
- raise Exception(f'The provided {pitch} is not valid for this narration voice.')
156
- if NarrationLanguage.to_enum(language) not in LANGUAGE_OPTIONS:
157
- raise Exception(f'The provided {language} is not valid for this narration voice.')
158
-
159
- @staticmethod
160
- def default():
161
- return Ttsmp3NarrationVoice(
162
- name = Ttsmp3VoiceName.DEFAULT.value,
163
- emotion = VoiceEmotion.DEFAULT,
164
- speed = VoiceSpeed.DEFAULT,
165
- pitch = VoicePitch.DEFAULT,
166
- language = NarrationLanguage.DEFAULT
167
- )
168
-
169
- # The voices but for a specific language, to be able to
170
- # choose one when this is requested from the outside
171
- def get_narrator_names_by_language(
172
- language: NarrationLanguage
173
- ) -> list[str]:
174
- language = NarrationLanguage.to_enum(language)
175
- language = (
176
- NarrationLanguage.SPANISH
177
- if language is NarrationLanguage.DEFAULT else
178
- language
179
- )
180
-
181
- return {
182
- NarrationLanguage.SPANISH: [
183
- Ttsmp3VoiceName.DEFAULT.value,
184
- Ttsmp3VoiceName.LUPE.value,
185
- Ttsmp3VoiceName.MIGUEL.value,
186
- Ttsmp3VoiceName.PENELOPE.value
187
- ]
188
- }[language]
189
-
190
- # All the remaining functionality we need to make it
191
- # work properly
192
- # TODO: Check this because I don't know if this webpage is using the tts (coqui)
193
- # library as the generator engine. If that, I have this engine in 'coqui.py' file
194
- # so I don't need this (that is not stable because is based in http requests)
195
- def narrate_tts3(
196
- text: str,
197
- voice: Ttsmp3NarrationVoice = Ttsmp3NarrationVoice.default(),
198
- output_filename: Union[str, None] = None
199
- ) -> str:
200
- """
201
- This makes a narration based on an external platform. You
202
- can change some voice configuration in code to make the
203
- voice different.
204
-
205
- Aparrently not limited. Check, because it has time breaks
206
- and that stuff to enhance the narration.
207
- """
208
- # From here: https://ttsmp3.com/
209
- headers = {
210
- 'accept': '*/*',
211
- 'accept-language': 'es-ES,es;q=0.9',
212
- 'content-type': 'application/x-www-form-urlencoded',
213
- 'origin': 'https://ttsmp3.com',
214
- 'referer': 'https://ttsmp3.com/',
215
- 'sec-ch-ua': '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
216
- 'sec-ch-ua-mobile': '?0',
217
- 'sec-ch-ua-platform': '"Windows"',
218
- 'sec-fetch-dest': 'empty',
219
- 'sec-fetch-mode': 'cors',
220
- 'sec-fetch-site': 'same-origin',
221
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
222
- }
223
-
224
- data = {
225
- 'msg': text,
226
- 'lang': voice.processed_name,
227
- 'source': 'ttsmp3',
228
- }
229
-
230
- """
231
- There is an AI voices version but it has a
232
- daily limit of only 1.000 characters so it
233
- is not interesting, that is why I leave the
234
- code but commented.
235
-
236
- The way to request AI voice narrations is
237
- the same, but using the AI url and the AI
238
- voices names instead of the normal ones.
239
- """
240
- # AI_VERSION_HEADERS = {
241
- # 'accept': '*/*',
242
- # 'accept-language': 'es-ES,es;q=0.9',
243
- # 'content-type': 'application/x-www-form-urlencoded',
244
- # 'origin': 'https://ttsmp3.com',
245
- # 'priority': 'u=1, i',
246
- # 'referer': 'https://ttsmp3.com/ai',
247
- # 'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
248
- # 'sec-ch-ua-mobile': '?0',
249
- # 'sec-ch-ua-platform': '"Windows"',
250
- # 'sec-fetch-dest': 'empty',
251
- # 'sec-fetch-mode': 'cors',
252
- # 'sec-fetch-site': 'same-origin',
253
- # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
254
- # }
255
- # AI_VERSION_URL = 'https:ttsmp3.com/makemp3_ai.php'
256
-
257
- response = requests.post('https://ttsmp3.com/makemp3_new.php', headers = headers, data = data)
258
- response = response.json()
259
- url = response['URL']
260
-
261
- output_filename = Output.get_filename(output_filename, FileType.AUDIO)
262
-
263
- # This is one example of a valid url we receive
264
- # as response:
265
- # https://ttsmp3.com/created_mp3/8b38a5f2d4664e98c9757eb6db93b914.mp3
266
- return Downloader.download_audio(url, output_filename).filename
6
+ from yta_audio_narration_ttsmp3 import Ttsmp3VoiceName, LANGUAGE_OPTIONS, EMOTION_OPTIONS, SPEED_OPTIONS, PITCH_OPTIONS, Ttsmp3NarrationVoice, get_narrator_names_by_language, narrate_tts3
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-audio-narration
3
- Version: 0.0.7
3
+ Version: 0.0.8
4
4
  Summary: Youtube Autonomous Audio Narration Module.
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -14,6 +14,7 @@ Requires-Dist: yta_audio_narration_microsoft (>=0.0.1,<1.0.0)
14
14
  Requires-Dist: yta_audio_narration_open_voice (>=0.0.1,<1.0.0)
15
15
  Requires-Dist: yta_audio_narration_tetyys (>=0.0.1,<1.0.0)
16
16
  Requires-Dist: yta_audio_narration_tiktok (>=0.0.1,<1.0.0)
17
+ Requires-Dist: yta_audio_narration_ttsmp3 (>=0.0.1,<1.0.0)
17
18
  Requires-Dist: yta_constants (>=0.0.1,<1.0.0)
18
19
  Requires-Dist: yta_file (>=0.0.1,<1.0.0)
19
20
  Requires-Dist: yta_file_downloader (>=0.0.1,<1.0.0)
@@ -8,9 +8,9 @@ yta_audio_narration/voices/microsoft.py,sha256=DnMyc2C5Zy2VEpYK2Xljsrhx8QibUPlIr
8
8
  yta_audio_narration/voices/open_voice.py,sha256=zU4LmXVAHgF3YCMrM--55RM1UGQX7kn9Y2137wCNu1Y,347
9
9
  yta_audio_narration/voices/tetyys.py,sha256=CFVpUJLA_SK7_wMBnhl4mlPNgI-VKvxXy0aX7MvxoxE,316
10
10
  yta_audio_narration/voices/tiktok.py,sha256=hX-u3Aqlu93IxahnrWGoGYIPJCPelsv7lxaxjoX1blE,316
11
- yta_audio_narration/voices/tortoise.py,sha256=qtL7Hl2f2bSjw2G81Ui-lTV8DZIcrJrKClkY3ulkf3I,6576
12
- yta_audio_narration/voices/ttsmp3.py,sha256=Zl3w4uY9n93RlpQv8c_1w22KZlb5BzHQRAqsheu5Gbo,8799
13
- yta_audio_narration-0.0.7.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
14
- yta_audio_narration-0.0.7.dist-info/METADATA,sha256=HUUMPaEtFaK_OUZytbBwmFfcNv0I6zPKbfFdMCNXU74,1176
15
- yta_audio_narration-0.0.7.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
16
- yta_audio_narration-0.0.7.dist-info/RECORD,,
11
+ yta_audio_narration/voices/tortoise.py,sha256=i-UVNDBqrete4Eb82Sh_G8vLgmsshsyKkqxVAPvNHtE,7039
12
+ yta_audio_narration/voices/ttsmp3.py,sha256=zwZftMuM_lziR9DBvaWC7m8QZYsF0i5H71BQzkKA7Sk,314
13
+ yta_audio_narration-0.0.8.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
14
+ yta_audio_narration-0.0.8.dist-info/METADATA,sha256=QB2Ku-71irI3ctdISBWE1PDVAP7LCIaSUoXDvAEQRQw,1235
15
+ yta_audio_narration-0.0.8.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
16
+ yta_audio_narration-0.0.8.dist-info/RECORD,,