webscout 5.1__py3-none-any.whl → 5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +83 -277
- webscout/AIbase.py +106 -4
- webscout/AIutel.py +31 -0
- webscout/Agents/Onlinesearcher.py +91 -104
- webscout/Agents/__init__.py +2 -1
- webscout/Agents/ai.py +186 -0
- webscout/Agents/functioncall.py +57 -27
- webscout/Bing_search.py +73 -43
- webscout/Local/_version.py +1 -1
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/Cloudflare.py +0 -4
- webscout/Provider/EDITEE.py +215 -0
- webscout/Provider/NetFly.py +256 -0
- webscout/Provider/TTI/PollinationsAI.py +138 -0
- webscout/Provider/TTI/__init__.py +2 -0
- webscout/Provider/TTI/deepinfra.py +148 -0
- webscout/Provider/TTS/__init__.py +2 -0
- webscout/Provider/TTS/streamElements.py +296 -0
- webscout/Provider/TTS/voicepod.py +114 -0
- webscout/Provider/TeachAnything.py +177 -0
- webscout/Provider/__init__.py +8 -0
- webscout/__init__.py +2 -0
- webscout/version.py +1 -1
- {webscout-5.1.dist-info → webscout-5.2.dist-info}/METADATA +32 -12
- {webscout-5.1.dist-info → webscout-5.2.dist-info}/RECORD +29 -19
- webscout/async_providers.py +0 -21
- {webscout-5.1.dist-info → webscout-5.2.dist-info}/LICENSE.md +0 -0
- {webscout-5.1.dist-info → webscout-5.2.dist-info}/WHEEL +0 -0
- {webscout-5.1.dist-info → webscout-5.2.dist-info}/entry_points.txt +0 -0
- {webscout-5.1.dist-info → webscout-5.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import pygame
|
|
3
|
+
import requests
|
|
4
|
+
import pathlib
|
|
5
|
+
import urllib.parse
|
|
6
|
+
from typing import Union, Generator
|
|
7
|
+
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.AIbase import TTSProvider
|
|
10
|
+
|
|
11
|
+
class StreamElements(TTSProvider):
|
|
12
|
+
"""
|
|
13
|
+
Text-to-speech provider using the StreamElements API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
# Request headers
|
|
17
|
+
headers: dict[str, str] = {
|
|
18
|
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
|
|
19
|
+
}
|
|
20
|
+
cache_dir = pathlib.Path("./audio_cache")
|
|
21
|
+
all_voices: list[str] = [
|
|
22
|
+
"Filiz",
|
|
23
|
+
"Astrid",
|
|
24
|
+
"Tatyana",
|
|
25
|
+
"Maxim",
|
|
26
|
+
"Carmen",
|
|
27
|
+
"Ines",
|
|
28
|
+
"Cristiano",
|
|
29
|
+
"Vitoria",
|
|
30
|
+
"Ricardo",
|
|
31
|
+
"Maja",
|
|
32
|
+
"Jan",
|
|
33
|
+
"Jacek",
|
|
34
|
+
"Ewa",
|
|
35
|
+
"Ruben",
|
|
36
|
+
"Lotte",
|
|
37
|
+
"Liv",
|
|
38
|
+
"Seoyeon",
|
|
39
|
+
"Takumi",
|
|
40
|
+
"Mizuki",
|
|
41
|
+
"Giorgio",
|
|
42
|
+
"Carla",
|
|
43
|
+
"Bianca",
|
|
44
|
+
"Karl",
|
|
45
|
+
"Dora",
|
|
46
|
+
"Mathieu",
|
|
47
|
+
"Celine",
|
|
48
|
+
"Chantal",
|
|
49
|
+
"Penelope",
|
|
50
|
+
"Miguel",
|
|
51
|
+
"Mia",
|
|
52
|
+
"Enrique",
|
|
53
|
+
"Conchita",
|
|
54
|
+
"Geraint",
|
|
55
|
+
"Salli",
|
|
56
|
+
"Matthew",
|
|
57
|
+
"Kimberly",
|
|
58
|
+
"Kendra",
|
|
59
|
+
"Justin",
|
|
60
|
+
"Joey",
|
|
61
|
+
"Joanna",
|
|
62
|
+
"Ivy",
|
|
63
|
+
"Raveena",
|
|
64
|
+
"Aditi",
|
|
65
|
+
"Emma",
|
|
66
|
+
"Brian",
|
|
67
|
+
"Amy",
|
|
68
|
+
"Russell",
|
|
69
|
+
"Nicole",
|
|
70
|
+
"Vicki",
|
|
71
|
+
"Marlene",
|
|
72
|
+
"Hans",
|
|
73
|
+
"Naja",
|
|
74
|
+
"Mads",
|
|
75
|
+
"Gwyneth",
|
|
76
|
+
"Zhiyu",
|
|
77
|
+
"es-ES-Standard-A",
|
|
78
|
+
"it-IT-Standard-A",
|
|
79
|
+
"it-IT-Wavenet-A",
|
|
80
|
+
"ja-JP-Standard-A",
|
|
81
|
+
"ja-JP-Wavenet-A",
|
|
82
|
+
"ko-KR-Standard-A",
|
|
83
|
+
"ko-KR-Wavenet-A",
|
|
84
|
+
"pt-BR-Standard-A",
|
|
85
|
+
"tr-TR-Standard-A",
|
|
86
|
+
"sv-SE-Standard-A",
|
|
87
|
+
"nl-NL-Standard-A",
|
|
88
|
+
"nl-NL-Wavenet-A",
|
|
89
|
+
"en-US-Wavenet-A",
|
|
90
|
+
"en-US-Wavenet-B",
|
|
91
|
+
"en-US-Wavenet-C",
|
|
92
|
+
"en-US-Wavenet-D",
|
|
93
|
+
"en-US-Wavenet-E",
|
|
94
|
+
"en-US-Wavenet-F",
|
|
95
|
+
"en-GB-Standard-A",
|
|
96
|
+
"en-GB-Standard-B",
|
|
97
|
+
"en-GB-Standard-C",
|
|
98
|
+
"en-GB-Standard-D",
|
|
99
|
+
"en-GB-Wavenet-A",
|
|
100
|
+
"en-GB-Wavenet-B",
|
|
101
|
+
"en-GB-Wavenet-C",
|
|
102
|
+
"en-GB-Wavenet-D",
|
|
103
|
+
"en-US-Standard-B",
|
|
104
|
+
"en-US-Standard-C",
|
|
105
|
+
"en-US-Standard-D",
|
|
106
|
+
"en-US-Standard-E",
|
|
107
|
+
"de-DE-Standard-A",
|
|
108
|
+
"de-DE-Standard-B",
|
|
109
|
+
"de-DE-Wavenet-A",
|
|
110
|
+
"de-DE-Wavenet-B",
|
|
111
|
+
"de-DE-Wavenet-C",
|
|
112
|
+
"de-DE-Wavenet-D",
|
|
113
|
+
"en-AU-Standard-A",
|
|
114
|
+
"en-AU-Standard-B",
|
|
115
|
+
"en-AU-Wavenet-A",
|
|
116
|
+
"en-AU-Wavenet-B",
|
|
117
|
+
"en-AU-Wavenet-C",
|
|
118
|
+
"en-AU-Wavenet-D",
|
|
119
|
+
"en-AU-Standard-C",
|
|
120
|
+
"en-AU-Standard-D",
|
|
121
|
+
"fr-CA-Standard-A",
|
|
122
|
+
"fr-CA-Standard-B",
|
|
123
|
+
"fr-CA-Standard-C",
|
|
124
|
+
"fr-CA-Standard-D",
|
|
125
|
+
"fr-FR-Standard-C",
|
|
126
|
+
"fr-FR-Standard-D",
|
|
127
|
+
"fr-FR-Wavenet-A",
|
|
128
|
+
"fr-FR-Wavenet-B",
|
|
129
|
+
"fr-FR-Wavenet-C",
|
|
130
|
+
"fr-FR-Wavenet-D",
|
|
131
|
+
"da-DK-Wavenet-A",
|
|
132
|
+
"pl-PL-Wavenet-A",
|
|
133
|
+
"pl-PL-Wavenet-B",
|
|
134
|
+
"pl-PL-Wavenet-C",
|
|
135
|
+
"pl-PL-Wavenet-D",
|
|
136
|
+
"pt-PT-Wavenet-A",
|
|
137
|
+
"pt-PT-Wavenet-B",
|
|
138
|
+
"pt-PT-Wavenet-C",
|
|
139
|
+
"pt-PT-Wavenet-D",
|
|
140
|
+
"ru-RU-Wavenet-A",
|
|
141
|
+
"ru-RU-Wavenet-B",
|
|
142
|
+
"ru-RU-Wavenet-C",
|
|
143
|
+
"ru-RU-Wavenet-D",
|
|
144
|
+
"sk-SK-Wavenet-A",
|
|
145
|
+
"tr-TR-Wavenet-A",
|
|
146
|
+
"tr-TR-Wavenet-B",
|
|
147
|
+
"tr-TR-Wavenet-C",
|
|
148
|
+
"tr-TR-Wavenet-D",
|
|
149
|
+
"tr-TR-Wavenet-E",
|
|
150
|
+
"uk-UA-Wavenet-A",
|
|
151
|
+
"ar-XA-Wavenet-A",
|
|
152
|
+
"ar-XA-Wavenet-B",
|
|
153
|
+
"ar-XA-Wavenet-C",
|
|
154
|
+
"cs-CZ-Wavenet-A",
|
|
155
|
+
"nl-NL-Wavenet-B",
|
|
156
|
+
"nl-NL-Wavenet-C",
|
|
157
|
+
"nl-NL-Wavenet-D",
|
|
158
|
+
"nl-NL-Wavenet-E",
|
|
159
|
+
"en-IN-Wavenet-A",
|
|
160
|
+
"en-IN-Wavenet-B",
|
|
161
|
+
"en-IN-Wavenet-C",
|
|
162
|
+
"fil-PH-Wavenet-A",
|
|
163
|
+
"fi-FI-Wavenet-A",
|
|
164
|
+
"el-GR-Wavenet-A",
|
|
165
|
+
"hi-IN-Wavenet-A",
|
|
166
|
+
"hi-IN-Wavenet-B",
|
|
167
|
+
"hi-IN-Wavenet-C",
|
|
168
|
+
"hu-HU-Wavenet-A",
|
|
169
|
+
"id-ID-Wavenet-A",
|
|
170
|
+
"id-ID-Wavenet-B",
|
|
171
|
+
"id-ID-Wavenet-C",
|
|
172
|
+
"it-IT-Wavenet-B",
|
|
173
|
+
"it-IT-Wavenet-C",
|
|
174
|
+
"it-IT-Wavenet-D",
|
|
175
|
+
"ja-JP-Wavenet-B",
|
|
176
|
+
"ja-JP-Wavenet-C",
|
|
177
|
+
"ja-JP-Wavenet-D",
|
|
178
|
+
"cmn-CN-Wavenet-A",
|
|
179
|
+
"cmn-CN-Wavenet-B",
|
|
180
|
+
"cmn-CN-Wavenet-C",
|
|
181
|
+
"cmn-CN-Wavenet-D",
|
|
182
|
+
"nb-no-Wavenet-E",
|
|
183
|
+
"nb-no-Wavenet-A",
|
|
184
|
+
"nb-no-Wavenet-B",
|
|
185
|
+
"nb-no-Wavenet-C",
|
|
186
|
+
"nb-no-Wavenet-D",
|
|
187
|
+
"vi-VN-Wavenet-A",
|
|
188
|
+
"vi-VN-Wavenet-B",
|
|
189
|
+
"vi-VN-Wavenet-C",
|
|
190
|
+
"vi-VN-Wavenet-D",
|
|
191
|
+
"sr-rs-Standard-A",
|
|
192
|
+
"lv-lv-Standard-A",
|
|
193
|
+
"is-is-Standard-A",
|
|
194
|
+
"bg-bg-Standard-A",
|
|
195
|
+
"af-ZA-Standard-A",
|
|
196
|
+
"Tracy",
|
|
197
|
+
"Danny",
|
|
198
|
+
"Huihui",
|
|
199
|
+
"Yaoyao",
|
|
200
|
+
"Kangkang",
|
|
201
|
+
"HanHan",
|
|
202
|
+
"Zhiwei",
|
|
203
|
+
"Asaf",
|
|
204
|
+
"An",
|
|
205
|
+
"Stefanos",
|
|
206
|
+
"Filip",
|
|
207
|
+
"Ivan",
|
|
208
|
+
"Heidi",
|
|
209
|
+
"Herena",
|
|
210
|
+
"Kalpana",
|
|
211
|
+
"Hemant",
|
|
212
|
+
"Matej",
|
|
213
|
+
"Andika",
|
|
214
|
+
"Rizwan",
|
|
215
|
+
"Lado",
|
|
216
|
+
"Valluvar",
|
|
217
|
+
"Linda",
|
|
218
|
+
"Heather",
|
|
219
|
+
"Sean",
|
|
220
|
+
"Michael",
|
|
221
|
+
"Karsten",
|
|
222
|
+
"Guillaume",
|
|
223
|
+
"Pattara",
|
|
224
|
+
"Jakub",
|
|
225
|
+
"Szabolcs",
|
|
226
|
+
"Hoda",
|
|
227
|
+
"Naayf",
|
|
228
|
+
]
|
|
229
|
+
|
|
230
|
+
def __init__(self, timeout: int = 20, proxies: dict = None):
|
|
231
|
+
"""Initializes the StreamElements TTS client."""
|
|
232
|
+
self.session = requests.Session()
|
|
233
|
+
self.session.headers.update(self.headers)
|
|
234
|
+
if proxies:
|
|
235
|
+
self.session.proxies.update(proxies)
|
|
236
|
+
self.timeout = timeout
|
|
237
|
+
|
|
238
|
+
def tts(self, text: str, voice: str = "Brian") -> str:
|
|
239
|
+
"""
|
|
240
|
+
Converts text to speech using the StreamElements API and saves it to a file.
|
|
241
|
+
"""
|
|
242
|
+
assert (
|
|
243
|
+
voice in self.all_voices
|
|
244
|
+
), f"Voice '{voice}' not one of [{', '.join(self.all_voices)}]"
|
|
245
|
+
|
|
246
|
+
url = f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{urllib.parse.quote(text)}}}"
|
|
247
|
+
filename = self.cache_dir / f"{int(time.time())}.mp3"
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
response = self.session.get(url=url, headers=self.headers, stream=True, timeout=self.timeout)
|
|
251
|
+
response.raise_for_status()
|
|
252
|
+
|
|
253
|
+
# Create the audio_cache directory if it doesn't exist
|
|
254
|
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
255
|
+
|
|
256
|
+
with open(filename, "wb") as f:
|
|
257
|
+
for chunk in response.iter_content(chunk_size=512):
|
|
258
|
+
if chunk:
|
|
259
|
+
f.write(chunk)
|
|
260
|
+
|
|
261
|
+
return filename.as_posix()
|
|
262
|
+
|
|
263
|
+
except requests.exceptions.RequestException as e:
|
|
264
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
265
|
+
f"Failed to perform the operation: {e}"
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
def play_audio(self, filename: str):
|
|
269
|
+
"""
|
|
270
|
+
Plays an audio file using pygame.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
filename (str): The path to the audio file.
|
|
274
|
+
|
|
275
|
+
Raises:
|
|
276
|
+
RuntimeError: If there is an error playing the audio.
|
|
277
|
+
"""
|
|
278
|
+
try:
|
|
279
|
+
pygame.mixer.init()
|
|
280
|
+
pygame.mixer.music.load(filename)
|
|
281
|
+
pygame.mixer.music.play()
|
|
282
|
+
while pygame.mixer.music.get_busy():
|
|
283
|
+
pygame.time.Clock().tick(10)
|
|
284
|
+
except Exception as e:
|
|
285
|
+
raise RuntimeError(f"Error playing audio: {e}")
|
|
286
|
+
|
|
287
|
+
# Example usage
|
|
288
|
+
if __name__ == "__main__":
|
|
289
|
+
streamelements = StreamElements()
|
|
290
|
+
text = "This is a test of the StreamElements text-to-speech API."
|
|
291
|
+
|
|
292
|
+
print("Generating audio...")
|
|
293
|
+
audio_file = streamelements.tts(text, voice="Brian")
|
|
294
|
+
|
|
295
|
+
print("Playing audio...")
|
|
296
|
+
streamelements.play_audio(audio_file)
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import pygame
|
|
4
|
+
import time
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Generator
|
|
7
|
+
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.AIbase import TTSProvider
|
|
10
|
+
|
|
11
|
+
class Voicepods(TTSProvider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the Voicepods text-to-speech API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, timeout: int = 20, proxies: dict = None):
|
|
17
|
+
"""
|
|
18
|
+
Initializes the Voicepods API client.
|
|
19
|
+
"""
|
|
20
|
+
self.api_endpoint = "https://voicepods-stream.vercel.app/api/resemble"
|
|
21
|
+
self.headers = {
|
|
22
|
+
'Accept': '*/*',
|
|
23
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
24
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
25
|
+
'Content-Type': 'application/json',
|
|
26
|
+
'DNT': '1',
|
|
27
|
+
'Origin': 'https://voicepods-stream.vercel.app',
|
|
28
|
+
'Referer': 'https://voicepods-stream.vercel.app/',
|
|
29
|
+
'Sec-CH-UA': '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
30
|
+
'Sec-CH-UA-Mobile': '?0',
|
|
31
|
+
'Sec-CH-UA-Platform': '"Windows"',
|
|
32
|
+
'Sec-Fetch-Dest': 'empty',
|
|
33
|
+
'Sec-Fetch-Mode': 'cors',
|
|
34
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
35
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0',
|
|
36
|
+
}
|
|
37
|
+
self.session = requests.Session()
|
|
38
|
+
self.session.headers.update(self.headers)
|
|
39
|
+
if proxies:
|
|
40
|
+
self.session.proxies.update(proxies)
|
|
41
|
+
self.timeout = timeout
|
|
42
|
+
self.audio_cache_dir = Path("./audio_cache")
|
|
43
|
+
|
|
44
|
+
def tts(self, text: str) -> str:
|
|
45
|
+
"""
|
|
46
|
+
Converts text to speech using the Voicepods API.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
text (str): The text to be converted to speech.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
str: The filename of the saved audio file.
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
exceptions.FailedToGenerateResponseError: If there is an error generating or saving the audio.
|
|
56
|
+
"""
|
|
57
|
+
payload = json.dumps({"query": text})
|
|
58
|
+
filename = self.audio_cache_dir / f"{int(time.time())}.wav" # Using timestamp for filename
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
response = self.session.post(self.api_endpoint, data=payload, timeout=self.timeout)
|
|
62
|
+
response.raise_for_status()
|
|
63
|
+
|
|
64
|
+
content_type = response.headers.get('Content-Type', '')
|
|
65
|
+
if 'audio' not in content_type.lower():
|
|
66
|
+
raise ValueError(f"Unexpected content type: {content_type}")
|
|
67
|
+
|
|
68
|
+
audio_data = response.content
|
|
69
|
+
self._save_audio(audio_data, filename)
|
|
70
|
+
return filename.as_posix() # Return the filename as a string
|
|
71
|
+
|
|
72
|
+
except requests.exceptions.RequestException as e:
|
|
73
|
+
raise exceptions.FailedToGenerateResponseError(f"Error generating audio: {e}")
|
|
74
|
+
|
|
75
|
+
def _save_audio(self, audio_data: bytes, filename: Path):
|
|
76
|
+
"""Saves the audio data to a WAV file in the audio cache directory."""
|
|
77
|
+
try:
|
|
78
|
+
# Create the audio_cache directory if it doesn't exist
|
|
79
|
+
self.audio_cache_dir.mkdir(parents=True, exist_ok=True)
|
|
80
|
+
|
|
81
|
+
riff_start = audio_data.find(b'RIFF')
|
|
82
|
+
if riff_start == -1:
|
|
83
|
+
raise ValueError("RIFF header not found in audio data")
|
|
84
|
+
|
|
85
|
+
trimmed_audio_data = audio_data[riff_start:]
|
|
86
|
+
|
|
87
|
+
with open(filename, "wb") as f:
|
|
88
|
+
f.write(trimmed_audio_data)
|
|
89
|
+
|
|
90
|
+
except Exception as e:
|
|
91
|
+
raise exceptions.FailedToGenerateResponseError(f"Error saving audio: {e}")
|
|
92
|
+
|
|
93
|
+
def play_audio(self, filename: str):
|
|
94
|
+
"""Plays the audio file using pygame."""
|
|
95
|
+
try:
|
|
96
|
+
pygame.mixer.init()
|
|
97
|
+
pygame.mixer.music.load(filename)
|
|
98
|
+
pygame.mixer.music.play()
|
|
99
|
+
while pygame.mixer.music.get_busy():
|
|
100
|
+
pygame.time.Clock().tick(10)
|
|
101
|
+
except Exception as e:
|
|
102
|
+
raise RuntimeError(f"Error playing audio: {e}")
|
|
103
|
+
|
|
104
|
+
# Example usage
|
|
105
|
+
if __name__ == "__main__":
|
|
106
|
+
|
|
107
|
+
voicepods = Voicepods()
|
|
108
|
+
text = "Hello, this is a test of the Voicepods text-to-speech system."
|
|
109
|
+
|
|
110
|
+
print("Generating audio...")
|
|
111
|
+
audio_file = voicepods.tts(text)
|
|
112
|
+
|
|
113
|
+
print("Playing audio...")
|
|
114
|
+
voicepods.play_audio(audio_file)
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
from requests.exceptions import RequestException
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
import logging
|
|
5
|
+
import random
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Conversation, Optimizers
|
|
8
|
+
|
|
9
|
+
class TeachAnything:
|
|
10
|
+
"""
|
|
11
|
+
A class to interact with the Teach-Anything API.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
is_conversation: bool = True,
|
|
18
|
+
max_tokens: int = 600,
|
|
19
|
+
timeout: int = 30,
|
|
20
|
+
intro: str = None,
|
|
21
|
+
filepath: str = None,
|
|
22
|
+
update_file: bool = True,
|
|
23
|
+
proxies: dict = {},
|
|
24
|
+
history_offset: int = 10250,
|
|
25
|
+
act: str = None,
|
|
26
|
+
) -> None:
|
|
27
|
+
"""
|
|
28
|
+
Initializes the Teach-Anything API with given parameters.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
32
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
33
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
34
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
35
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
36
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
37
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
38
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
39
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
40
|
+
model (str, optional): AI model to use for text generation. Defaults to "gpt4".
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
self.session = requests.Session()
|
|
45
|
+
self.is_conversation = is_conversation
|
|
46
|
+
self.max_tokens_to_sample = max_tokens
|
|
47
|
+
self.api_endpoint = "https://www.teach-anything.com/api/generate"
|
|
48
|
+
self.timeout = timeout
|
|
49
|
+
self.last_response = {}
|
|
50
|
+
self.headers = {
|
|
51
|
+
"authority": "www.teach-anything.com",
|
|
52
|
+
"path": "/api/generate",
|
|
53
|
+
"scheme": "https",
|
|
54
|
+
"accept": "*/*",
|
|
55
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
56
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
57
|
+
"content-type": "application/json",
|
|
58
|
+
"origin": "https://www.teach-anything.com",
|
|
59
|
+
"referer": "https://www.teach-anything.com/",
|
|
60
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
61
|
+
}
|
|
62
|
+
self.session.headers.update(self.headers)
|
|
63
|
+
self.conversation = Conversation(
|
|
64
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
65
|
+
)
|
|
66
|
+
self.conversation.history_offset = history_offset
|
|
67
|
+
self.session.proxies = proxies
|
|
68
|
+
|
|
69
|
+
def ask(
|
|
70
|
+
self,
|
|
71
|
+
prompt: str,
|
|
72
|
+
stream: bool = False,
|
|
73
|
+
raw: bool = False,
|
|
74
|
+
optimizer: str = None,
|
|
75
|
+
conversationally: bool = False,
|
|
76
|
+
) -> dict:
|
|
77
|
+
"""Chat with AI
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
prompt (str): Prompt to be send.
|
|
81
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
82
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
83
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
84
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
The response from the API.
|
|
88
|
+
"""
|
|
89
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
|
+
if optimizer:
|
|
91
|
+
if optimizer in self.__available_optimizers:
|
|
92
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
93
|
+
conversation_prompt if conversationally else prompt
|
|
94
|
+
)
|
|
95
|
+
else:
|
|
96
|
+
raise Exception(
|
|
97
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
payload = {
|
|
101
|
+
"prompt": conversation_prompt
|
|
102
|
+
}
|
|
103
|
+
def for_stream():
|
|
104
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
|
|
105
|
+
if not response.ok:
|
|
106
|
+
raise RequestException(
|
|
107
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
resp = response.text
|
|
111
|
+
self.last_response.update(dict(text=resp))
|
|
112
|
+
self.conversation.update_chat_history(
|
|
113
|
+
prompt, self.get_message(self.last_response)
|
|
114
|
+
)
|
|
115
|
+
return self.last_response
|
|
116
|
+
|
|
117
|
+
def for_non_stream():
|
|
118
|
+
for _ in for_stream():
|
|
119
|
+
pass
|
|
120
|
+
return self.last_response
|
|
121
|
+
|
|
122
|
+
return for_stream() if stream else for_non_stream()
|
|
123
|
+
|
|
124
|
+
def chat(
|
|
125
|
+
self,
|
|
126
|
+
prompt: str,
|
|
127
|
+
stream: bool = False,
|
|
128
|
+
optimizer: str = None,
|
|
129
|
+
conversationally: bool = False,
|
|
130
|
+
) -> str:
|
|
131
|
+
"""Generate response `str`
|
|
132
|
+
Args:
|
|
133
|
+
prompt (str): Prompt to be send.
|
|
134
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
135
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
136
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
137
|
+
Returns:
|
|
138
|
+
str: Response generated
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
def for_stream():
|
|
142
|
+
for response in self.ask(
|
|
143
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
144
|
+
):
|
|
145
|
+
yield self.get_message(response)
|
|
146
|
+
|
|
147
|
+
def for_non_stream():
|
|
148
|
+
return self.get_message(
|
|
149
|
+
self.ask(
|
|
150
|
+
prompt,
|
|
151
|
+
False,
|
|
152
|
+
optimizer=optimizer,
|
|
153
|
+
conversationally=conversationally,
|
|
154
|
+
)
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
return for_stream() if stream else for_non_stream()
|
|
158
|
+
|
|
159
|
+
def get_message(self, response: dict) -> str:
|
|
160
|
+
"""Retrieves message only from response
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
response (dict): Response generated by `self.ask`
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
str: Message extracted
|
|
167
|
+
"""
|
|
168
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
169
|
+
return response["text"]
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
if __name__ == '__main__':
|
|
173
|
+
from rich import print
|
|
174
|
+
ai = TeachAnything()
|
|
175
|
+
response = ai.chat(input(">>> "))
|
|
176
|
+
for chunk in response:
|
|
177
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/__init__.py
CHANGED
|
@@ -43,6 +43,10 @@ from .Youchat import *
|
|
|
43
43
|
from .yep import *
|
|
44
44
|
from .Cloudflare import *
|
|
45
45
|
from .turboseek import *
|
|
46
|
+
from .NetFly import *
|
|
47
|
+
from .EDITEE import *
|
|
48
|
+
from .TeachAnything import *
|
|
49
|
+
from .AI21 import *
|
|
46
50
|
__all__ = [
|
|
47
51
|
'ThinkAnyAI',
|
|
48
52
|
'Farfalle',
|
|
@@ -89,4 +93,8 @@ __all__ = [
|
|
|
89
93
|
'YEPCHAT',
|
|
90
94
|
'Cloudflare',
|
|
91
95
|
'TurboSeek',
|
|
96
|
+
'NetFly',
|
|
97
|
+
'Editee',
|
|
98
|
+
'TeachAnything',
|
|
99
|
+
'AI21',
|
|
92
100
|
]
|
webscout/__init__.py
CHANGED
|
@@ -11,6 +11,8 @@ from .Bing_search import *
|
|
|
11
11
|
import g4f
|
|
12
12
|
from .YTdownloader import *
|
|
13
13
|
from .Provider import *
|
|
14
|
+
from .Provider.TTI import *
|
|
15
|
+
from .Provider.TTS import *
|
|
14
16
|
from .Extra import gguf
|
|
15
17
|
from .Extra import autollama
|
|
16
18
|
from .Extra import weather_ascii, weather
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "
|
|
1
|
+
__version__ = "5.2"
|
|
2
2
|
__prog__ = "webscout"
|