webscout 5.8__py3-none-any.whl → 6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/Amigo.py +267 -0
- webscout/Provider/ChatHub.py +209 -0
- webscout/Provider/Chatify.py +3 -3
- webscout/Provider/Cloudflare.py +3 -3
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/Deepinfra.py +95 -389
- webscout/Provider/Deepseek.py +4 -6
- webscout/Provider/DiscordRocks.py +3 -3
- webscout/Provider/Free2GPT.py +3 -3
- webscout/Provider/OLLAMA.py +4 -4
- webscout/Provider/RUBIKSAI.py +3 -3
- webscout/Provider/TTI/WebSimAI.py +142 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/amigo.py +148 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/parler.py +108 -0
- webscout/Provider/Youchat.py +4 -5
- webscout/Provider/__init__.py +10 -5
- webscout/Provider/ai4chat.py +3 -2
- webscout/Provider/bagoodex.py +145 -0
- webscout/Provider/bixin.py +3 -3
- webscout/Provider/cleeai.py +3 -3
- webscout/Provider/elmo.py +2 -5
- webscout/Provider/julius.py +6 -40
- webscout/Provider/learnfastai.py +253 -0
- webscout/Provider/llamatutor.py +2 -2
- webscout/Provider/prefind.py +232 -0
- webscout/Provider/promptrefine.py +3 -3
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/twitterclone.py +25 -41
- webscout/Provider/upstage.py +3 -3
- webscout/Provider/x0gpt.py +6 -6
- webscout/version.py +1 -1
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/METADATA +187 -121
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/RECORD +39 -32
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/WHEEL +1 -1
- webscout/Provider/Poe.py +0 -208
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/LICENSE.md +0 -0
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/entry_points.txt +0 -0
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import os
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from webscout.AIbase import ImageProvider
|
|
6
|
+
|
|
7
|
+
class WebSimAI(ImageProvider):
|
|
8
|
+
"""
|
|
9
|
+
Image provider for WebSim.ai.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(self, timeout: int = 60, proxies: dict = {}):
|
|
13
|
+
"""Initializes the WebSimAI class.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
|
|
17
|
+
proxies (dict, optional): HTTP request proxies (socks). Defaults to {}.
|
|
18
|
+
"""
|
|
19
|
+
self.url = "https://websim.ai/api/image_gen"
|
|
20
|
+
self.headers = {
|
|
21
|
+
"Accept": "*/*",
|
|
22
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
23
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
24
|
+
"Content-Type": "application/json",
|
|
25
|
+
"User-Agent": (
|
|
26
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
27
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
28
|
+
"Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
|
|
29
|
+
),
|
|
30
|
+
"Origin": "https://websim.ai",
|
|
31
|
+
"Referer": "https://websim.ai/p/a5yvwmtj8qz6ayx4tlg1"
|
|
32
|
+
}
|
|
33
|
+
self.session = requests.Session()
|
|
34
|
+
self.session.headers.update(self.headers)
|
|
35
|
+
self.session.proxies.update(proxies)
|
|
36
|
+
self.timeout = timeout
|
|
37
|
+
self.prompt: str = "AI-generated image - Webscout"
|
|
38
|
+
self.image_extension: str = "png"
|
|
39
|
+
|
|
40
|
+
def generate(
|
|
41
|
+
self,
|
|
42
|
+
prompt: str,
|
|
43
|
+
amount: int = 1,
|
|
44
|
+
width: int = 1024,
|
|
45
|
+
height: int = 756
|
|
46
|
+
) -> List[bytes]:
|
|
47
|
+
"""Generate image from prompt
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
prompt (str): Image description.
|
|
51
|
+
amount (int, optional): Total images to be generated. Defaults to 1.
|
|
52
|
+
width (int, optional): Width of the generated image. Defaults to 1024.
|
|
53
|
+
height (int, optional): Height of the generated image. Defaults to 756.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
List[bytes]: List of generated images as bytes.
|
|
57
|
+
"""
|
|
58
|
+
assert bool(prompt), "Prompt cannot be null"
|
|
59
|
+
assert isinstance(amount, int), f"Amount should be an integer only, not {type(amount)}"
|
|
60
|
+
assert amount > 0, "Amount should be greater than 0"
|
|
61
|
+
|
|
62
|
+
self.prompt = prompt
|
|
63
|
+
response = []
|
|
64
|
+
|
|
65
|
+
for _ in range(amount):
|
|
66
|
+
payload = {
|
|
67
|
+
"prompt": prompt,
|
|
68
|
+
"width": width,
|
|
69
|
+
"height": height,
|
|
70
|
+
"site_id": "KcWvHOHNBP2PmWUYZ",
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
resp = self.session.post(self.url, headers=self.headers, json=payload, timeout=self.timeout)
|
|
75
|
+
resp.raise_for_status() # Raises HTTPError for bad responses
|
|
76
|
+
|
|
77
|
+
response_data = resp.json()
|
|
78
|
+
image_url = response_data.get("url")
|
|
79
|
+
if not image_url:
|
|
80
|
+
print("No image URL found in the response.")
|
|
81
|
+
continue
|
|
82
|
+
|
|
83
|
+
image_response = requests.get(image_url)
|
|
84
|
+
image_response.raise_for_status()
|
|
85
|
+
response.append(image_response.content)
|
|
86
|
+
|
|
87
|
+
except requests.exceptions.HTTPError as http_err:
|
|
88
|
+
print(f"HTTP error occurred: {http_err} - {response.text}")
|
|
89
|
+
return [] # Return an empty list on error
|
|
90
|
+
except requests.exceptions.RequestException as req_err:
|
|
91
|
+
print(f"Request error occurred: {req_err}")
|
|
92
|
+
return [] # Return an empty list on error
|
|
93
|
+
|
|
94
|
+
return response
|
|
95
|
+
|
|
96
|
+
def save(
|
|
97
|
+
self,
|
|
98
|
+
response: List[bytes],
|
|
99
|
+
name: str = None,
|
|
100
|
+
dir: str = os.getcwd(),
|
|
101
|
+
filenames_prefix: str = "",
|
|
102
|
+
) -> List[str]:
|
|
103
|
+
"""Save generated images
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
response (List[bytes]): List of generated images as bytes.
|
|
107
|
+
name (str): Filename for the images. Defaults to the last prompt.
|
|
108
|
+
dir (str, optional): Directory for saving images. Defaults to os.getcwd().
|
|
109
|
+
filenames_prefix (str, optional): String to be prefixed at each filename to be returned.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
List[str]: List of saved filenames.
|
|
113
|
+
"""
|
|
114
|
+
assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
|
|
115
|
+
name = self.prompt if name is None else name
|
|
116
|
+
|
|
117
|
+
filenames = []
|
|
118
|
+
count = 0
|
|
119
|
+
for image in response:
|
|
120
|
+
def complete_path():
|
|
121
|
+
count_value = "" if count == 0 else f"_{count}"
|
|
122
|
+
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
123
|
+
|
|
124
|
+
while os.path.isfile(complete_path()):
|
|
125
|
+
count += 1
|
|
126
|
+
|
|
127
|
+
absolute_path_to_file = complete_path()
|
|
128
|
+
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
129
|
+
|
|
130
|
+
with open(absolute_path_to_file, "wb") as fh:
|
|
131
|
+
fh.write(image)
|
|
132
|
+
|
|
133
|
+
return filenames
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
if __name__ == "__main__":
|
|
137
|
+
bot = WebSimAI()
|
|
138
|
+
try:
|
|
139
|
+
resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
|
|
140
|
+
print(bot.save(resp))
|
|
141
|
+
except Exception as e:
|
|
142
|
+
print(f"An error occurred: {e}")
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
import os
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from webscout.AIbase import ImageProvider
|
|
8
|
+
|
|
9
|
+
class AmigoImager(ImageProvider):
|
|
10
|
+
"""
|
|
11
|
+
Image provider for AmigoChat.io.
|
|
12
|
+
"""
|
|
13
|
+
AVAILABLE_MODELS = ["dalle-e-3", "flux-pro", "flux-realism"]
|
|
14
|
+
def __init__(self, timeout: int = 60, proxies: dict = {}):
|
|
15
|
+
"""Initializes the AmigoImager class.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
|
|
19
|
+
proxies (dict, optional): HTTP request proxies. Defaults to {}.
|
|
20
|
+
"""
|
|
21
|
+
self.url = "https://api.amigochat.io/v1/images/generations"
|
|
22
|
+
self.headers = {
|
|
23
|
+
"Accept": "*/*",
|
|
24
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
25
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
26
|
+
"Authorization": "Bearer ", # Empty
|
|
27
|
+
"Content-Type": "application/json; charset=utf-8",
|
|
28
|
+
"DNT": "1",
|
|
29
|
+
"Origin": "https://amigochat.io",
|
|
30
|
+
"Referer": "https://amigochat.io/",
|
|
31
|
+
"Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
32
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
33
|
+
"Sec-CH-UA-Platform": '"Windows"',
|
|
34
|
+
"Sec-Fetch-Dest": "empty",
|
|
35
|
+
"Sec-Fetch-Mode": "cors",
|
|
36
|
+
"Sec-Fetch-Site": "same-site",
|
|
37
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
|
|
38
|
+
"X-Device-Language": "en-US",
|
|
39
|
+
"X-Device-Platform": "web",
|
|
40
|
+
"X-Device-UUID": str(uuid.uuid4()),
|
|
41
|
+
"X-Device-Version": "1.0.22"
|
|
42
|
+
}
|
|
43
|
+
self.session = requests.Session()
|
|
44
|
+
self.session.headers.update(self.headers)
|
|
45
|
+
self.session.proxies.update(proxies)
|
|
46
|
+
self.timeout = timeout
|
|
47
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
48
|
+
self.image_extension: str = "png"
|
|
49
|
+
|
|
50
|
+
def generate(self, prompt: str, amount: int = 1, model: str = "flux-pro") -> List[str]:
|
|
51
|
+
"""Generate image from prompt
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
prompt (str): Image description.
|
|
55
|
+
amount (int, optional): Total images to be generated. Defaults to 1.
|
|
56
|
+
model (str, optional): Model to use for generating images. Defaults to "flux-pro".
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
List[str]: List of generated image URLs.
|
|
60
|
+
"""
|
|
61
|
+
assert bool(prompt), "Prompt cannot be null"
|
|
62
|
+
assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
|
|
63
|
+
assert amount > 0, "Amount should be greater than 0"
|
|
64
|
+
assert model in self.AVAILABLE_MODELS, f"Model should be one of {self.AVAILABLE_MODELS}"
|
|
65
|
+
|
|
66
|
+
self.prompt = prompt
|
|
67
|
+
response = []
|
|
68
|
+
|
|
69
|
+
for _ in range(amount):
|
|
70
|
+
# JSON payload for the request
|
|
71
|
+
payload = {
|
|
72
|
+
"prompt": prompt,
|
|
73
|
+
"model": model,
|
|
74
|
+
"personaId": "image-generator"
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
# Sending the POST request
|
|
79
|
+
resp = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
|
|
80
|
+
resp.raise_for_status()
|
|
81
|
+
|
|
82
|
+
# Process the response
|
|
83
|
+
response_data = resp.json()
|
|
84
|
+
image_url = response_data['data'][0]['url']
|
|
85
|
+
response.append(image_url)
|
|
86
|
+
|
|
87
|
+
except requests.exceptions.RequestException as e:
|
|
88
|
+
print(f"An error occurred: {e}")
|
|
89
|
+
raise
|
|
90
|
+
|
|
91
|
+
return response
|
|
92
|
+
|
|
93
|
+
def save(
|
|
94
|
+
self,
|
|
95
|
+
response: List[str], # List of image URLs
|
|
96
|
+
name: str = None,
|
|
97
|
+
dir: str = os.getcwd(),
|
|
98
|
+
filenames_prefix: str = "",
|
|
99
|
+
) -> List[str]:
|
|
100
|
+
"""Save generated images
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
response (List[str]): List of generated image URLs.
|
|
104
|
+
name (str): Filename for the images. Defaults to the last prompt.
|
|
105
|
+
dir (str, optional): Directory for saving images. Defaults to os.getcwd().
|
|
106
|
+
filenames_prefix (str, optional): String to be prefixed at each filename to be returned.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
List[str]: List of saved filenames.
|
|
110
|
+
"""
|
|
111
|
+
assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
|
|
112
|
+
name = self.prompt if name is None else name
|
|
113
|
+
|
|
114
|
+
filenames = []
|
|
115
|
+
count = 0
|
|
116
|
+
for img_url in response:
|
|
117
|
+
def complete_path():
|
|
118
|
+
count_value = "" if count == 0 else f"_{count}"
|
|
119
|
+
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
120
|
+
|
|
121
|
+
while os.path.isfile(complete_path()):
|
|
122
|
+
count += 1
|
|
123
|
+
|
|
124
|
+
absolute_path_to_file = complete_path()
|
|
125
|
+
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
126
|
+
|
|
127
|
+
# Download and save the image
|
|
128
|
+
try:
|
|
129
|
+
img_response = requests.get(img_url, stream=True, timeout=self.timeout)
|
|
130
|
+
img_response.raise_for_status()
|
|
131
|
+
|
|
132
|
+
with open(absolute_path_to_file, "wb") as fh:
|
|
133
|
+
for chunk in img_response.iter_content(chunk_size=8192):
|
|
134
|
+
fh.write(chunk)
|
|
135
|
+
except requests.exceptions.RequestException as e:
|
|
136
|
+
print(f"An error occurred while downloading image from {img_url}: {e}")
|
|
137
|
+
raise
|
|
138
|
+
|
|
139
|
+
return filenames
|
|
140
|
+
|
|
141
|
+
# Example usage
|
|
142
|
+
if __name__ == "__main__":
|
|
143
|
+
bot = AmigoImager()
|
|
144
|
+
try:
|
|
145
|
+
resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
|
|
146
|
+
print(bot.save(resp))
|
|
147
|
+
except Exception as e:
|
|
148
|
+
print(f"An error occurred: {e}")
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Generator
|
|
4
|
+
from playsound import playsound
|
|
5
|
+
from webscout import exceptions
|
|
6
|
+
from webscout.AIbase import TTSProvider
|
|
7
|
+
|
|
8
|
+
from gradio_client import Client
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ParlerTTS(TTSProvider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Parler TTS API through Gradio Client.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, timeout: int = 20, proxies: dict = None):
|
|
18
|
+
"""Initializes the Parler TTS client."""
|
|
19
|
+
self.api_endpoint = "/gen_tts"
|
|
20
|
+
self.client = Client("parler-tts/parler_tts") # Initialize the Gradio client
|
|
21
|
+
self.timeout = timeout
|
|
22
|
+
self.audio_cache_dir = Path("./audio_cache")
|
|
23
|
+
|
|
24
|
+
def tts(self, text: str, description: str = "", use_large: bool = False) -> str:
|
|
25
|
+
"""
|
|
26
|
+
Converts text to speech using the Parler TTS API.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
text (str): The text to be converted to speech.
|
|
30
|
+
description (str, optional): Description of the desired voice characteristics. Defaults to "".
|
|
31
|
+
use_large (bool, optional): Whether to use the large model variant. Defaults to False.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
str: The filename of the saved audio file.
|
|
35
|
+
|
|
36
|
+
Raises:
|
|
37
|
+
exceptions.FailedToGenerateResponseError: If there is an error generating or saving the audio.
|
|
38
|
+
"""
|
|
39
|
+
filename = self.audio_cache_dir / f"{int(time.time())}.wav"
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
result = self.client.predict(
|
|
43
|
+
text=text,
|
|
44
|
+
description=description,
|
|
45
|
+
use_large=use_large,
|
|
46
|
+
api_name=self.api_endpoint,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
if isinstance(result, bytes):
|
|
50
|
+
audio_bytes = result
|
|
51
|
+
elif isinstance(result, str) and os.path.isfile(result):
|
|
52
|
+
with open(result, "rb") as f:
|
|
53
|
+
audio_bytes = f.read()
|
|
54
|
+
else:
|
|
55
|
+
raise ValueError(f"Unexpected response from API: {result}")
|
|
56
|
+
|
|
57
|
+
self._save_audio(audio_bytes, filename)
|
|
58
|
+
return filename.as_posix()
|
|
59
|
+
|
|
60
|
+
except Exception as e:
|
|
61
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
62
|
+
f"Error generating audio after multiple retries: {e}"
|
|
63
|
+
) from e
|
|
64
|
+
|
|
65
|
+
def _save_audio(self, audio_data: bytes, filename: Path):
|
|
66
|
+
"""Saves the audio data to a WAV file in the audio cache directory."""
|
|
67
|
+
try:
|
|
68
|
+
self.audio_cache_dir.mkdir(parents=True, exist_ok=True)
|
|
69
|
+
with open(filename, "wb") as f:
|
|
70
|
+
f.write(audio_data)
|
|
71
|
+
|
|
72
|
+
except Exception as e:
|
|
73
|
+
raise exceptions.FailedToGenerateResponseError(f"Error saving audio: {e}")
|
|
74
|
+
|
|
75
|
+
def play_audio(self, filename: str):
|
|
76
|
+
"""
|
|
77
|
+
Plays an audio file using playsound.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
filename (str): The path to the audio file.
|
|
81
|
+
|
|
82
|
+
Raises:
|
|
83
|
+
RuntimeError: If there is an error playing the audio.
|
|
84
|
+
"""
|
|
85
|
+
try:
|
|
86
|
+
playsound(filename)
|
|
87
|
+
except Exception as e:
|
|
88
|
+
raise RuntimeError(f"Error playing audio: {e}")
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
# Example usage
|
|
92
|
+
if __name__ == "__main__":
|
|
93
|
+
parlertts = ParlerTTS()
|
|
94
|
+
text = (
|
|
95
|
+
"All of the data, pre-processing, training code, and weights are released "
|
|
96
|
+
"publicly under a permissive license, enabling the community to build on our work "
|
|
97
|
+
"and develop their own powerful models."
|
|
98
|
+
)
|
|
99
|
+
voice_description = (
|
|
100
|
+
"Laura's voice is monotone yet slightly fast in delivery, with a very close "
|
|
101
|
+
"recording that almost has no background noise."
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
print("Generating audio...")
|
|
105
|
+
audio_file = parlertts.tts(text, description=voice_description, use_large=False)
|
|
106
|
+
|
|
107
|
+
print("Playing audio...")
|
|
108
|
+
parlertts.play_audio(audio_file)
|
webscout/Provider/Youchat.py
CHANGED
|
@@ -160,11 +160,10 @@ class YouChat(Provider):
|
|
|
160
160
|
token = data.get('youChatToken', '')
|
|
161
161
|
if token:
|
|
162
162
|
streaming_text += token
|
|
163
|
-
|
|
164
|
-
self.last_response.update(resp)
|
|
165
|
-
yield value if raw else resp
|
|
163
|
+
yield token if raw else dict(text=token)
|
|
166
164
|
except json.decoder.JSONDecodeError:
|
|
167
165
|
pass
|
|
166
|
+
self.last_response.update(dict(text=streaming_text))
|
|
168
167
|
self.conversation.update_chat_history(
|
|
169
168
|
prompt, self.get_message(self.last_response)
|
|
170
169
|
)
|
|
@@ -224,7 +223,7 @@ class YouChat(Provider):
|
|
|
224
223
|
return response["text"]
|
|
225
224
|
if __name__ == '__main__':
|
|
226
225
|
from rich import print
|
|
227
|
-
ai = YouChat()
|
|
228
|
-
response = ai.chat("
|
|
226
|
+
ai = YouChat(timeout=5000)
|
|
227
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
229
228
|
for chunk in response:
|
|
230
229
|
print(chunk, end="", flush=True)
|
webscout/Provider/__init__.py
CHANGED
|
@@ -16,10 +16,9 @@ from .Phind import PhindSearch
|
|
|
16
16
|
from .Phind import Phindv2
|
|
17
17
|
from .ai4chat import *
|
|
18
18
|
from .Gemini import GEMINI
|
|
19
|
-
from .Poe import POE
|
|
20
19
|
from .BasedGPT import BasedGPT
|
|
21
20
|
from .Deepseek import DeepSeek
|
|
22
|
-
from .Deepinfra import DeepInfra
|
|
21
|
+
from .Deepinfra import DeepInfra
|
|
23
22
|
from .Farfalle import *
|
|
24
23
|
from .cleeai import *
|
|
25
24
|
from .OLLAMA import OLLAMA
|
|
@@ -59,6 +58,10 @@ from .twitterclone import *
|
|
|
59
58
|
from .tutorai import *
|
|
60
59
|
from .bixin import *
|
|
61
60
|
from .ChatGPTES import *
|
|
61
|
+
from .Amigo import *
|
|
62
|
+
from .prefind import *
|
|
63
|
+
from .bagoodex import *
|
|
64
|
+
from .ChatHub import *
|
|
62
65
|
__all__ = [
|
|
63
66
|
'Farfalle',
|
|
64
67
|
'LLAMA',
|
|
@@ -76,12 +79,9 @@ __all__ = [
|
|
|
76
79
|
'PhindSearch',
|
|
77
80
|
'Felo',
|
|
78
81
|
'GEMINI',
|
|
79
|
-
'POE',
|
|
80
82
|
'BasedGPT',
|
|
81
83
|
'DeepSeek',
|
|
82
84
|
'DeepInfra',
|
|
83
|
-
'VLM',
|
|
84
|
-
'AsyncDeepInfra',
|
|
85
85
|
'AI4Chat',
|
|
86
86
|
'Phindv2',
|
|
87
87
|
'OLLAMA',
|
|
@@ -122,6 +122,11 @@ __all__ = [
|
|
|
122
122
|
'TutorAI',
|
|
123
123
|
'Bixin',
|
|
124
124
|
'ChatGPTES',
|
|
125
|
+
'AmigoChat',
|
|
126
|
+
'PrefindAI',
|
|
127
|
+
'Bagoodex',
|
|
128
|
+
'ChatHub',
|
|
129
|
+
# 'LearnFast',
|
|
125
130
|
|
|
126
131
|
|
|
127
132
|
]
|
webscout/Provider/ai4chat.py
CHANGED
|
@@ -195,5 +195,6 @@ class AI4Chat(Provider):
|
|
|
195
195
|
if __name__ == "__main__":
|
|
196
196
|
from rich import print
|
|
197
197
|
ai = AI4Chat()
|
|
198
|
-
response = ai.chat(
|
|
199
|
-
|
|
198
|
+
response = ai.chat("write me poem about AI", stream=True)
|
|
199
|
+
for chunk in response:
|
|
200
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
class Bagoodex(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Bagoodex API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 2049, # Set a reasonable default
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
):
|
|
29
|
+
"""Initializes the Bagoodex API client."""
|
|
30
|
+
self.url = "https://bagoodex.io/front-api/chat"
|
|
31
|
+
self.headers = {"Content-Type": "application/json"}
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.session.headers.update(self.headers)
|
|
34
|
+
self.session.proxies.update(proxies) # Use provided proxies
|
|
35
|
+
self.timeout = timeout
|
|
36
|
+
self.last_response = {}
|
|
37
|
+
|
|
38
|
+
self.is_conversation = is_conversation
|
|
39
|
+
self.max_tokens_to_sample = max_tokens
|
|
40
|
+
self.__available_optimizers = (
|
|
41
|
+
method
|
|
42
|
+
for method in dir(Optimizers)
|
|
43
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
44
|
+
)
|
|
45
|
+
Conversation.intro = (
|
|
46
|
+
AwesomePrompts().get_act(
|
|
47
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
48
|
+
)
|
|
49
|
+
if act
|
|
50
|
+
else intro or Conversation.intro
|
|
51
|
+
)
|
|
52
|
+
self.conversation = Conversation(
|
|
53
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
54
|
+
)
|
|
55
|
+
self.conversation.history_offset = history_offset
|
|
56
|
+
|
|
57
|
+
def ask(
|
|
58
|
+
self,
|
|
59
|
+
prompt: str,
|
|
60
|
+
stream: bool = False,
|
|
61
|
+
raw: bool = False,
|
|
62
|
+
optimizer: str = None,
|
|
63
|
+
conversationally: bool = False,
|
|
64
|
+
) -> Dict[str, Any] | Generator:
|
|
65
|
+
"""Sends a chat completion request to the Bagoodex API."""
|
|
66
|
+
|
|
67
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
68
|
+
if optimizer:
|
|
69
|
+
if optimizer in self.__available_optimizers:
|
|
70
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
71
|
+
conversation_prompt if conversationally else prompt
|
|
72
|
+
)
|
|
73
|
+
else:
|
|
74
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
payload = {
|
|
78
|
+
"prompt": "You are AI", # This seems to be required by the API
|
|
79
|
+
"messages": [{"content": "Hi, this is chatgpt, let's talk", "role": "assistant"}],
|
|
80
|
+
"input": conversation_prompt,
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
def for_stream():
|
|
84
|
+
try:
|
|
85
|
+
response = self.session.post(self.url, json=payload, headers=self.headers, timeout=self.timeout)
|
|
86
|
+
response.raise_for_status()
|
|
87
|
+
text = response.text
|
|
88
|
+
self.last_response.update({"text": text})
|
|
89
|
+
|
|
90
|
+
if stream:
|
|
91
|
+
for char in text:
|
|
92
|
+
yield {"text": char} # Yielding one character at a time for streaming
|
|
93
|
+
else:
|
|
94
|
+
yield {"text": text}
|
|
95
|
+
|
|
96
|
+
except (requests.exceptions.RequestException, json.JSONDecodeError) as e: # Catch JSON errors too
|
|
97
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
|
|
98
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
99
|
+
|
|
100
|
+
def for_non_stream():
|
|
101
|
+
for _ in for_stream(): pass
|
|
102
|
+
return self.last_response
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
return for_stream() if stream else for_non_stream()
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def chat(
|
|
111
|
+
self,
|
|
112
|
+
prompt: str,
|
|
113
|
+
stream: bool = False,
|
|
114
|
+
optimizer: str = None,
|
|
115
|
+
conversationally: bool = False,
|
|
116
|
+
) -> Union[str, Generator]:
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def for_stream():
|
|
120
|
+
for response in self.ask(
|
|
121
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
122
|
+
):
|
|
123
|
+
yield self.get_message(response)
|
|
124
|
+
|
|
125
|
+
def for_non_stream():
|
|
126
|
+
return self.get_message(
|
|
127
|
+
self.ask(
|
|
128
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
129
|
+
)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return for_stream() if stream else for_non_stream()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_message(self, response: dict) -> str:
|
|
136
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
137
|
+
return response.get("text", "")
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
if __name__ == "__main__":
|
|
141
|
+
from rich import print
|
|
142
|
+
ai = Bagoodex()
|
|
143
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
144
|
+
for chunk in response:
|
|
145
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/bixin.py
CHANGED
|
@@ -174,7 +174,7 @@ class Bixin(Provider):
|
|
|
174
174
|
if text.startswith(previous_text):
|
|
175
175
|
new_text = text[len(previous_text):]
|
|
176
176
|
full_response += new_text
|
|
177
|
-
yield new_text if raw else dict(text=
|
|
177
|
+
yield new_text if raw else dict(text=new_text)
|
|
178
178
|
previous_text = text
|
|
179
179
|
else:
|
|
180
180
|
full_response += text
|
|
@@ -258,7 +258,7 @@ class Bixin(Provider):
|
|
|
258
258
|
if __name__ == "__main__":
|
|
259
259
|
from rich import print
|
|
260
260
|
|
|
261
|
-
ai = Bixin()
|
|
262
|
-
response = ai.chat(
|
|
261
|
+
ai = Bixin(timeout=5000)
|
|
262
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
263
263
|
for chunk in response:
|
|
264
264
|
print(chunk, end="", flush=True)
|
webscout/Provider/cleeai.py
CHANGED
|
@@ -142,7 +142,7 @@ class Cleeai(Provider):
|
|
|
142
142
|
full_response = ''
|
|
143
143
|
for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
|
|
144
144
|
full_response += chunk.decode('utf-8')
|
|
145
|
-
yield chunk.decode('utf-8') if raw else dict(text=
|
|
145
|
+
yield chunk.decode('utf-8') if raw else dict(text=chunk.decode('utf-8'))
|
|
146
146
|
|
|
147
147
|
self.last_response.update(dict(text=full_response))
|
|
148
148
|
self.conversation.update_chat_history(
|
|
@@ -206,7 +206,7 @@ class Cleeai(Provider):
|
|
|
206
206
|
|
|
207
207
|
if __name__ == "__main__":
|
|
208
208
|
from rich import print
|
|
209
|
-
ai = Cleeai()
|
|
210
|
-
response = ai.chat("
|
|
209
|
+
ai = Cleeai(timeout=5000)
|
|
210
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
211
211
|
for chunk in response:
|
|
212
212
|
print(chunk, end="", flush=True)
|