webscout 6.2b0__py3-none-any.whl → 6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -176
- webscout/AIbase.py +112 -239
- webscout/AIutel.py +488 -1130
- webscout/Agents/functioncall.py +248 -198
- webscout/Bing_search.py +250 -153
- webscout/DWEBS.py +454 -178
- webscout/Extra/__init__.py +2 -1
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder_utiles.py +121 -0
- webscout/Extra/autocoder/rawdog.py +681 -0
- webscout/Extra/autollama.py +246 -195
- webscout/Extra/gguf.py +441 -226
- webscout/Extra/weather.py +172 -67
- webscout/LLM.py +442 -100
- webscout/Litlogger/__init__.py +681 -0
- webscout/Local/formats.py +4 -2
- webscout/Provider/Amigo.py +19 -10
- webscout/Provider/Andi.py +0 -33
- webscout/Provider/Blackboxai.py +4 -204
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/EDITEE.py +1 -1
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/Marcus.py +137 -0
- webscout/Provider/NinjaChat.py +1 -1
- webscout/Provider/PI.py +221 -207
- webscout/Provider/Perplexity.py +598 -598
- webscout/Provider/RoboCoders.py +206 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
- webscout/Provider/TTI/__init__.py +3 -4
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +184 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
- webscout/Provider/TTI/blackbox/__init__.py +4 -0
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
- webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
- webscout/Provider/TTI/deepinfra/__init__.py +4 -0
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/imgninza/__init__.py +4 -0
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
- webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/__init__.py +146 -132
- webscout/Provider/askmyai.py +158 -0
- webscout/Provider/cerebras.py +227 -206
- webscout/Provider/geminiapi.py +208 -198
- webscout/Provider/llama3mitril.py +180 -0
- webscout/Provider/llmchat.py +203 -0
- webscout/Provider/mhystical.py +176 -0
- webscout/Provider/perplexitylabs.py +265 -0
- webscout/Provider/talkai.py +196 -0
- webscout/Provider/twitterclone.py +251 -244
- webscout/Provider/typegpt.py +359 -0
- webscout/__init__.py +28 -23
- webscout/__main__.py +5 -5
- webscout/cli.py +327 -347
- webscout/conversation.py +227 -0
- webscout/exceptions.py +161 -29
- webscout/litagent/__init__.py +172 -0
- webscout/litprinter/__init__.py +831 -0
- webscout/optimizers.py +270 -0
- webscout/prompt_manager.py +279 -0
- webscout/swiftcli/__init__.py +810 -0
- webscout/transcriber.py +479 -551
- webscout/update_checker.py +125 -0
- webscout/version.py +1 -1
- webscout-6.4.dist-info/LICENSE.md +211 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/METADATA +34 -55
- webscout-6.4.dist-info/RECORD +154 -0
- webscout/Provider/TTI/AIuncensored.py +0 -103
- webscout/Provider/TTI/Nexra.py +0 -120
- webscout/Provider/TTI/PollinationsAI.py +0 -138
- webscout/Provider/TTI/WebSimAI.py +0 -142
- webscout/Provider/TTI/aiforce.py +0 -160
- webscout/Provider/TTI/artbit.py +0 -141
- webscout/Provider/TTI/deepinfra.py +0 -148
- webscout/Provider/TTI/huggingface.py +0 -155
- webscout/models.py +0 -23
- webscout-6.2b0.dist-info/LICENSE.md +0 -50
- webscout-6.2b0.dist-info/RECORD +0 -118
- /webscout/{g4f.py → gpt4free.py} +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from webscout.AIbase import ImageProvider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.litagent import agent # Import our fire user agent generator 🔥
|
|
10
|
+
from webscout.Litlogger import LitLogger # For that cyberpunk logging swag ⚡
|
|
11
|
+
|
|
12
|
+
# Initialize our fire logger 🚀
|
|
13
|
+
logger = LitLogger("TalkaiImager")
|
|
14
|
+
|
|
15
|
+
class TalkaiImager(ImageProvider):
|
|
16
|
+
"""
|
|
17
|
+
TalkAI Image Provider - Your go-to for fire AI art! 🎨
|
|
18
|
+
|
|
19
|
+
>>> # Generate some fire art! 🔥
|
|
20
|
+
>>> imager = TalkaiImager(logging=True)
|
|
21
|
+
>>> images = imager.generate("Epic dragon breathing fire", amount=2)
|
|
22
|
+
>>> paths = imager.save(images)
|
|
23
|
+
>>> print(paths)
|
|
24
|
+
['epic_dragon_0.png', 'epic_dragon_1.png']
|
|
25
|
+
|
|
26
|
+
>>> # Turn off logging for stealth mode 🥷
|
|
27
|
+
>>> quiet_imager = TalkaiImager(logging=False)
|
|
28
|
+
>>> images = quiet_imager.generate("Cyberpunk city at night")
|
|
29
|
+
>>> paths = quiet_imager.save(images)
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, timeout: int = 60, proxies: dict = {}, logging: bool = True):
|
|
33
|
+
"""Initialize your TalkAI provider with custom settings! ⚙️
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
timeout (int): Request timeout in seconds (default: 60)
|
|
37
|
+
proxies (dict): Proxy settings for requests (default: {})
|
|
38
|
+
logging (bool): Enable fire logging (default: True)
|
|
39
|
+
"""
|
|
40
|
+
self.api_endpoint = "https://talkai.info/chat/send/"
|
|
41
|
+
self.headers = {
|
|
42
|
+
'accept': 'application/json',
|
|
43
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
44
|
+
'content-type': 'application/json',
|
|
45
|
+
'origin': 'https://talkai.info',
|
|
46
|
+
'referer': 'https://talkai.info/image/',
|
|
47
|
+
'user-agent': agent.random(), # Using our fire random agent! 🔥
|
|
48
|
+
}
|
|
49
|
+
self.session = requests.Session()
|
|
50
|
+
self.session.headers.update(self.headers)
|
|
51
|
+
self.session.proxies.update(proxies)
|
|
52
|
+
self.timeout = timeout
|
|
53
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
54
|
+
self.image_extension: str = "png"
|
|
55
|
+
self.logging = logging
|
|
56
|
+
if self.logging:
|
|
57
|
+
logger.info("TalkaiImager initialized! Ready to create some fire art! 🚀")
|
|
58
|
+
|
|
59
|
+
def generate(
|
|
60
|
+
self, prompt: str, amount: int = 1,
|
|
61
|
+
max_retries: int = 3, retry_delay: int = 5
|
|
62
|
+
) -> List[str]:
|
|
63
|
+
"""Generate some fire images from your prompt! 🎨
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
prompt (str): Your creative prompt
|
|
67
|
+
amount (int): How many images to generate
|
|
68
|
+
max_retries (int): Max retry attempts if generation fails
|
|
69
|
+
retry_delay (int): Seconds to wait between retries
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
List[str]: List of image URLs
|
|
73
|
+
"""
|
|
74
|
+
assert bool(prompt), "Prompt cannot be empty."
|
|
75
|
+
assert isinstance(amount, int) and amount > 0, "Amount must be a positive integer."
|
|
76
|
+
|
|
77
|
+
self.prompt = prompt
|
|
78
|
+
image_urls = []
|
|
79
|
+
|
|
80
|
+
if self.logging:
|
|
81
|
+
logger.info(f"Generating {amount} images... 🎨")
|
|
82
|
+
|
|
83
|
+
for _ in range(amount):
|
|
84
|
+
for attempt in range(max_retries):
|
|
85
|
+
try:
|
|
86
|
+
with self.session.post(
|
|
87
|
+
self.api_endpoint,
|
|
88
|
+
json=self._create_payload(prompt),
|
|
89
|
+
timeout=self.timeout
|
|
90
|
+
) as response:
|
|
91
|
+
response.raise_for_status()
|
|
92
|
+
data = response.json()
|
|
93
|
+
|
|
94
|
+
if 'data' in data and len(data['data']) > 0 and 'url' in data['data'][0]:
|
|
95
|
+
image_urls.append(data['data'][0]['url'])
|
|
96
|
+
if self.logging:
|
|
97
|
+
logger.success(f"Generated image {len(image_urls)}/{amount}! 🎨")
|
|
98
|
+
break
|
|
99
|
+
else:
|
|
100
|
+
raise exceptions.InvalidResponseError("No image URL found in API response.")
|
|
101
|
+
|
|
102
|
+
except requests.exceptions.RequestException as e:
|
|
103
|
+
if attempt == max_retries - 1:
|
|
104
|
+
if self.logging:
|
|
105
|
+
logger.error(f"Error making API request: {e} 😢")
|
|
106
|
+
raise exceptions.APIConnectionError(f"Error making API request: {e}") from e
|
|
107
|
+
else:
|
|
108
|
+
if self.logging:
|
|
109
|
+
logger.warning(f"Attempt {attempt + 1} failed. Retrying in {retry_delay} seconds... 🔄")
|
|
110
|
+
import time
|
|
111
|
+
time.sleep(retry_delay)
|
|
112
|
+
except json.JSONDecodeError as e:
|
|
113
|
+
if self.logging:
|
|
114
|
+
logger.error(f"Invalid JSON response: {e} 😢")
|
|
115
|
+
raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}") from e
|
|
116
|
+
except Exception as e:
|
|
117
|
+
if self.logging:
|
|
118
|
+
logger.error(f"An unexpected error occurred: {e} 😢")
|
|
119
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}") from e
|
|
120
|
+
|
|
121
|
+
if self.logging:
|
|
122
|
+
logger.success("All images generated successfully! 🎉")
|
|
123
|
+
return image_urls
|
|
124
|
+
|
|
125
|
+
def _create_payload(self, prompt: str) -> Dict[str, Any]:
|
|
126
|
+
"""Create the API request payload 📦
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
prompt (str): The image generation prompt
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Dict[str, Any]: API request payload
|
|
133
|
+
"""
|
|
134
|
+
return {
|
|
135
|
+
"type": "image",
|
|
136
|
+
"messagesHistory": [
|
|
137
|
+
{
|
|
138
|
+
"id": str(uuid.uuid4()),
|
|
139
|
+
"from": "you",
|
|
140
|
+
"content": prompt
|
|
141
|
+
}
|
|
142
|
+
],
|
|
143
|
+
"settings": {
|
|
144
|
+
"model": "gpt-4o-mini" # Or another suitable model if available
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
def save(
|
|
149
|
+
self,
|
|
150
|
+
response: List[str],
|
|
151
|
+
name: str = None,
|
|
152
|
+
dir: str = os.getcwd(),
|
|
153
|
+
filenames_prefix: str = "",
|
|
154
|
+
) -> List[str]:
|
|
155
|
+
"""Save your fire images! 💾
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
response (List[str]): List of image URLs
|
|
159
|
+
name (str, optional): Base name for saved files
|
|
160
|
+
dir (str, optional): Where to save the images
|
|
161
|
+
filenames_prefix (str, optional): Prefix for filenames
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
List[str]: List of saved filenames
|
|
165
|
+
"""
|
|
166
|
+
assert isinstance(response, list), f"Response should be a list, not {type(response)}"
|
|
167
|
+
name = self.prompt if name is None else name
|
|
168
|
+
|
|
169
|
+
if not os.path.exists(dir):
|
|
170
|
+
os.makedirs(dir)
|
|
171
|
+
if self.logging:
|
|
172
|
+
logger.info(f"Created directory: {dir} 📁")
|
|
173
|
+
|
|
174
|
+
if self.logging:
|
|
175
|
+
logger.info(f"Saving {len(response)} images... 💾")
|
|
176
|
+
|
|
177
|
+
filenames = []
|
|
178
|
+
for i, url in enumerate(response):
|
|
179
|
+
try:
|
|
180
|
+
with self.session.get(url, stream=True, timeout=self.timeout) as r:
|
|
181
|
+
r.raise_for_status()
|
|
182
|
+
filename = f"{filenames_prefix}{name}_{i}.{self.image_extension}"
|
|
183
|
+
filepath = os.path.join(dir, filename)
|
|
184
|
+
with open(filepath, 'wb') as f:
|
|
185
|
+
for chunk in r.iter_content(chunk_size=8192):
|
|
186
|
+
f.write(chunk)
|
|
187
|
+
filenames.append(filename)
|
|
188
|
+
if self.logging:
|
|
189
|
+
logger.success(f"Saved image to: {filepath} 💾")
|
|
190
|
+
except requests.exceptions.RequestException as e:
|
|
191
|
+
if self.logging:
|
|
192
|
+
logger.error(f"Error downloading image from {url}: {e} 😢")
|
|
193
|
+
filenames.append(None) # Indicate failure to download
|
|
194
|
+
|
|
195
|
+
if self.logging:
|
|
196
|
+
logger.success(f"All images saved successfully! Check {dir} 🎉")
|
|
197
|
+
return filenames
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
if __name__ == "__main__":
|
|
201
|
+
bot = TalkaiImager()
|
|
202
|
+
try:
|
|
203
|
+
resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
|
|
204
|
+
print(bot.save(resp))
|
|
205
|
+
except Exception as e:
|
|
206
|
+
if bot.logging:
|
|
207
|
+
logger.error(f"An error occurred: {e} 😢")
|
webscout/Provider/__init__.py
CHANGED
|
@@ -1,133 +1,147 @@
|
|
|
1
|
-
# webscout/providers/__init__.py
|
|
2
|
-
from .PI import *
|
|
3
|
-
from .Llama import LLAMA
|
|
4
|
-
from .Cohere import Cohere
|
|
5
|
-
from .Reka import REKA
|
|
6
|
-
from .Groq import GROQ
|
|
7
|
-
from .Groq import AsyncGROQ
|
|
8
|
-
from .Openai import OPENAI
|
|
9
|
-
from .Openai import AsyncOPENAI
|
|
10
|
-
from .Koboldai import KOBOLDAI
|
|
11
|
-
from .Koboldai import AsyncKOBOLDAI
|
|
12
|
-
from .
|
|
13
|
-
from .
|
|
14
|
-
from .
|
|
15
|
-
from .
|
|
16
|
-
from .Phind import
|
|
17
|
-
from .
|
|
18
|
-
from .
|
|
19
|
-
from .
|
|
20
|
-
from .
|
|
21
|
-
from .
|
|
22
|
-
from .
|
|
23
|
-
from .
|
|
24
|
-
from .
|
|
25
|
-
from .
|
|
26
|
-
from .
|
|
27
|
-
from .
|
|
28
|
-
from .
|
|
29
|
-
from .
|
|
30
|
-
from .
|
|
31
|
-
from .
|
|
32
|
-
from .
|
|
33
|
-
from .
|
|
34
|
-
from .
|
|
35
|
-
from .
|
|
36
|
-
from .
|
|
37
|
-
from .
|
|
38
|
-
from .
|
|
39
|
-
from .
|
|
40
|
-
from .
|
|
41
|
-
from .
|
|
42
|
-
from .
|
|
43
|
-
from .
|
|
44
|
-
from .
|
|
45
|
-
from .
|
|
46
|
-
from .
|
|
47
|
-
from .
|
|
48
|
-
from .
|
|
49
|
-
from .
|
|
50
|
-
from .
|
|
51
|
-
from .
|
|
52
|
-
|
|
53
|
-
from .
|
|
54
|
-
from .
|
|
55
|
-
from .
|
|
56
|
-
from .
|
|
57
|
-
from .
|
|
58
|
-
from .
|
|
59
|
-
from .
|
|
60
|
-
from .
|
|
61
|
-
|
|
62
|
-
from .
|
|
63
|
-
from .
|
|
64
|
-
from .
|
|
65
|
-
from .
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
'
|
|
76
|
-
'
|
|
77
|
-
'
|
|
78
|
-
'
|
|
79
|
-
'
|
|
80
|
-
'
|
|
81
|
-
'
|
|
82
|
-
'
|
|
83
|
-
'
|
|
84
|
-
'
|
|
85
|
-
'
|
|
86
|
-
'
|
|
87
|
-
'
|
|
88
|
-
'
|
|
89
|
-
'
|
|
90
|
-
'
|
|
91
|
-
'
|
|
92
|
-
'
|
|
93
|
-
'
|
|
94
|
-
'
|
|
95
|
-
'
|
|
96
|
-
'
|
|
97
|
-
'
|
|
98
|
-
'
|
|
99
|
-
'
|
|
100
|
-
'
|
|
101
|
-
'
|
|
102
|
-
'
|
|
103
|
-
'
|
|
104
|
-
'
|
|
105
|
-
'
|
|
106
|
-
'
|
|
107
|
-
'
|
|
108
|
-
'
|
|
109
|
-
'
|
|
110
|
-
'
|
|
111
|
-
'
|
|
112
|
-
'
|
|
113
|
-
'
|
|
114
|
-
'
|
|
115
|
-
'
|
|
116
|
-
'
|
|
117
|
-
|
|
118
|
-
'
|
|
119
|
-
'
|
|
120
|
-
'
|
|
121
|
-
'
|
|
122
|
-
'
|
|
123
|
-
'
|
|
124
|
-
'
|
|
125
|
-
'
|
|
126
|
-
# '
|
|
127
|
-
'
|
|
128
|
-
'
|
|
129
|
-
'
|
|
130
|
-
'
|
|
131
|
-
|
|
132
|
-
|
|
1
|
+
# webscout/providers/__init__.py
|
|
2
|
+
from .PI import *
|
|
3
|
+
from .Llama import LLAMA
|
|
4
|
+
from .Cohere import Cohere
|
|
5
|
+
from .Reka import REKA
|
|
6
|
+
from .Groq import GROQ
|
|
7
|
+
from .Groq import AsyncGROQ
|
|
8
|
+
from .Openai import OPENAI
|
|
9
|
+
from .Openai import AsyncOPENAI
|
|
10
|
+
from .Koboldai import KOBOLDAI
|
|
11
|
+
from .Koboldai import AsyncKOBOLDAI
|
|
12
|
+
from .RoboCoders import RoboCoders
|
|
13
|
+
from .Perplexity import *
|
|
14
|
+
from .perplexitylabs import PerplexityLabs
|
|
15
|
+
from .Blackboxai import BLACKBOXAI
|
|
16
|
+
from .Phind import PhindSearch
|
|
17
|
+
from .Phind import Phindv2
|
|
18
|
+
from .ai4chat import *
|
|
19
|
+
from .Gemini import GEMINI
|
|
20
|
+
from .Deepseek import DeepSeek
|
|
21
|
+
from .Deepinfra import DeepInfra
|
|
22
|
+
from .Farfalle import *
|
|
23
|
+
from .cleeai import *
|
|
24
|
+
from .OLLAMA import OLLAMA
|
|
25
|
+
from .Andi import AndiSearch
|
|
26
|
+
from .PizzaGPT import *
|
|
27
|
+
from .Llama3 import *
|
|
28
|
+
from .DARKAI import *
|
|
29
|
+
from .koala import *
|
|
30
|
+
from .RUBIKSAI import *
|
|
31
|
+
from .meta import *
|
|
32
|
+
from .DiscordRocks import *
|
|
33
|
+
from .felo_search import *
|
|
34
|
+
from .julius import *
|
|
35
|
+
from .Youchat import *
|
|
36
|
+
from .yep import *
|
|
37
|
+
from .Cloudflare import *
|
|
38
|
+
from .turboseek import *
|
|
39
|
+
from .Free2GPT import *
|
|
40
|
+
from .EDITEE import *
|
|
41
|
+
from .TeachAnything import *
|
|
42
|
+
from .AI21 import *
|
|
43
|
+
from .Chatify import *
|
|
44
|
+
from .x0gpt import *
|
|
45
|
+
from .cerebras import *
|
|
46
|
+
from .lepton import *
|
|
47
|
+
from .geminiapi import *
|
|
48
|
+
from .elmo import *
|
|
49
|
+
from .genspark import *
|
|
50
|
+
from .upstage import *
|
|
51
|
+
from .Bing import *
|
|
52
|
+
from .GPTWeb import *
|
|
53
|
+
# from .UNFINISHED.aigames import *
|
|
54
|
+
from .llamatutor import *
|
|
55
|
+
from .promptrefine import *
|
|
56
|
+
from .twitterclone import *
|
|
57
|
+
from .tutorai import *
|
|
58
|
+
from .ChatGPTES import *
|
|
59
|
+
from .Amigo import *
|
|
60
|
+
from .prefind import *
|
|
61
|
+
from .bagoodex import *
|
|
62
|
+
# from .UNFINISHED.ChatHub import *
|
|
63
|
+
from .aimathgpt import *
|
|
64
|
+
from .gaurish import *
|
|
65
|
+
from .geminiprorealtime import *
|
|
66
|
+
from .NinjaChat import *
|
|
67
|
+
from .llmchat import *
|
|
68
|
+
from .talkai import *
|
|
69
|
+
from .askmyai import *
|
|
70
|
+
from .llama3mitril import *
|
|
71
|
+
from .Marcus import *
|
|
72
|
+
from .typegpt import *
|
|
73
|
+
from .mhystical import *
|
|
74
|
+
__all__ = [
|
|
75
|
+
'Farfalle',
|
|
76
|
+
'LLAMA',
|
|
77
|
+
'Cohere',
|
|
78
|
+
'REKA',
|
|
79
|
+
'GROQ',
|
|
80
|
+
'AsyncGROQ',
|
|
81
|
+
'OPENAI',
|
|
82
|
+
'AsyncOPENAI',
|
|
83
|
+
'KOBOLDAI',
|
|
84
|
+
'AsyncKOBOLDAI',
|
|
85
|
+
'Perplexity',
|
|
86
|
+
'PerplexityLabs',
|
|
87
|
+
'BLACKBOXAI',
|
|
88
|
+
'PhindSearch',
|
|
89
|
+
'Felo',
|
|
90
|
+
'GEMINI',
|
|
91
|
+
'DeepSeek',
|
|
92
|
+
'DeepInfra',
|
|
93
|
+
'AI4Chat',
|
|
94
|
+
'Phindv2',
|
|
95
|
+
'OLLAMA',
|
|
96
|
+
'AndiSearch',
|
|
97
|
+
'PIZZAGPT',
|
|
98
|
+
'LLAMA3',
|
|
99
|
+
'DARKAI',
|
|
100
|
+
'KOALA',
|
|
101
|
+
'RUBIKSAI',
|
|
102
|
+
'Meta',
|
|
103
|
+
'AskMyAI',
|
|
104
|
+
'DiscordRocks',
|
|
105
|
+
'PiAI',
|
|
106
|
+
'Julius',
|
|
107
|
+
'YouChat',
|
|
108
|
+
'YEPCHAT',
|
|
109
|
+
'Cloudflare',
|
|
110
|
+
'TurboSeek',
|
|
111
|
+
'Editee',
|
|
112
|
+
'TeachAnything',
|
|
113
|
+
'AI21',
|
|
114
|
+
'Chatify',
|
|
115
|
+
'X0GPT',
|
|
116
|
+
'Cerebras',
|
|
117
|
+
'Lepton',
|
|
118
|
+
'GEMINIAPI',
|
|
119
|
+
'Cleeai',
|
|
120
|
+
'Elmo',
|
|
121
|
+
'Genspark',
|
|
122
|
+
'Upstage',
|
|
123
|
+
'Free2GPT',
|
|
124
|
+
'Bing',
|
|
125
|
+
'GPTWeb',
|
|
126
|
+
# 'AIGameIO',
|
|
127
|
+
'LlamaTutor',
|
|
128
|
+
'PromptRefine',
|
|
129
|
+
'AIUncensored',
|
|
130
|
+
'TutorAI',
|
|
131
|
+
'ChatGPTES',
|
|
132
|
+
'AmigoChat',
|
|
133
|
+
'PrefindAI',
|
|
134
|
+
'Bagoodex',
|
|
135
|
+
# 'ChatHub',
|
|
136
|
+
'AIMathGPT',
|
|
137
|
+
'GaurishCerebras',
|
|
138
|
+
'GeminiPro',
|
|
139
|
+
'NinjaChat',
|
|
140
|
+
'LLMChat',
|
|
141
|
+
'Talkai',
|
|
142
|
+
'Llama3Mitril',
|
|
143
|
+
'Marcus',
|
|
144
|
+
'RoboCoders',
|
|
145
|
+
'TypeGPT',
|
|
146
|
+
'Mhystical',
|
|
133
147
|
]
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Dict, Optional, Generator
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from fake_useragent import UserAgent
|
|
12
|
+
|
|
13
|
+
class AskMyAI(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the askmyai.chat API. Improved to match webscout standards.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 2048, # Added max_tokens parameter
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
system_prompt: str = "You are a helpful assistant.", # Added system prompt
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the AskMyAI API."""
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.is_conversation = is_conversation
|
|
34
|
+
self.max_tokens_to_sample = max_tokens
|
|
35
|
+
self.api_endpoint = "https://www.askmyai.chat/api/chat"
|
|
36
|
+
self.timeout = timeout
|
|
37
|
+
self.last_response = {}
|
|
38
|
+
self.system_prompt = system_prompt # Use system prompt
|
|
39
|
+
self.headers = {
|
|
40
|
+
"Content-Type": "application/json",
|
|
41
|
+
"Accept": "*/*",
|
|
42
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
43
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
44
|
+
'user-agent': UserAgent().random
|
|
45
|
+
}
|
|
46
|
+
self.__available_optimizers = (
|
|
47
|
+
method
|
|
48
|
+
for method in dir(Optimizers)
|
|
49
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
50
|
+
)
|
|
51
|
+
Conversation.intro = (
|
|
52
|
+
AwesomePrompts().get_act(
|
|
53
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
54
|
+
)
|
|
55
|
+
if act
|
|
56
|
+
else intro or Conversation.intro
|
|
57
|
+
)
|
|
58
|
+
self.conversation = Conversation(
|
|
59
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
60
|
+
)
|
|
61
|
+
self.conversation.history_offset = history_offset
|
|
62
|
+
self.session.proxies = proxies
|
|
63
|
+
|
|
64
|
+
def ask(
|
|
65
|
+
self,
|
|
66
|
+
prompt: str,
|
|
67
|
+
stream: bool = False,
|
|
68
|
+
raw: bool = False,
|
|
69
|
+
optimizer: str = None,
|
|
70
|
+
conversationally: bool = False,
|
|
71
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
72
|
+
"""Sends a prompt to the askmyai.chat API and returns the response."""
|
|
73
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
74
|
+
if optimizer:
|
|
75
|
+
if optimizer in self.__available_optimizers:
|
|
76
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
77
|
+
conversation_prompt if conversationally else prompt
|
|
78
|
+
)
|
|
79
|
+
else:
|
|
80
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
81
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
payload = {
|
|
85
|
+
"messages": [
|
|
86
|
+
{"role": "system", "content": self.system_prompt},
|
|
87
|
+
{"role": "user", "content": conversation_prompt}
|
|
88
|
+
],
|
|
89
|
+
"data": {"datasource": "thucpn"}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
def for_stream():
|
|
93
|
+
response = self.session.post(
|
|
94
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
95
|
+
)
|
|
96
|
+
if not response.ok:
|
|
97
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
98
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
streaming_response = ""
|
|
102
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
103
|
+
if line:
|
|
104
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
105
|
+
if match:
|
|
106
|
+
content = match.group(1)
|
|
107
|
+
streaming_response += content
|
|
108
|
+
yield content if raw else {"text": content}
|
|
109
|
+
self.last_response.update({"text": streaming_response})
|
|
110
|
+
self.conversation.update_chat_history(
|
|
111
|
+
prompt, self.get_message(self.last_response)
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def for_non_stream():
|
|
115
|
+
full_response = ""
|
|
116
|
+
for chunk in for_stream():
|
|
117
|
+
full_response += chunk if raw else chunk['text']
|
|
118
|
+
return {"text": full_response}
|
|
119
|
+
|
|
120
|
+
return for_stream() if stream else for_non_stream()
|
|
121
|
+
|
|
122
|
+
def chat(
|
|
123
|
+
self,
|
|
124
|
+
prompt: str,
|
|
125
|
+
stream: bool = False,
|
|
126
|
+
optimizer: str = None,
|
|
127
|
+
conversationally: bool = False,
|
|
128
|
+
) -> str | Generator[str, None, None]:
|
|
129
|
+
"""Generates a response from the AskMyAI API."""
|
|
130
|
+
|
|
131
|
+
def for_stream():
|
|
132
|
+
for response in self.ask(
|
|
133
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
134
|
+
):
|
|
135
|
+
yield self.get_message(response)
|
|
136
|
+
|
|
137
|
+
def for_non_stream():
|
|
138
|
+
return self.get_message(
|
|
139
|
+
self.ask(
|
|
140
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return for_stream() if stream else for_non_stream()
|
|
145
|
+
|
|
146
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
147
|
+
"""Extracts the message from the API response."""
|
|
148
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
149
|
+
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
150
|
+
|
|
151
|
+
if __name__ == "__main__":
|
|
152
|
+
from rich import print
|
|
153
|
+
|
|
154
|
+
ai = AskMyAI(timeout=30)
|
|
155
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
156
|
+
|
|
157
|
+
for chunk in response:
|
|
158
|
+
print(chunk, end="", flush=True)
|