webscout 6.2b0__py3-none-any.whl → 6.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIbase.py +309 -239
- webscout/Agents/functioncall.py +248 -198
- webscout/DWEBS.py +322 -178
- webscout/Extra/gguf.py +250 -60
- webscout/Extra/weather.py +172 -67
- webscout/LLM.py +279 -100
- webscout/Local/formats.py +4 -2
- webscout/Provider/Amigo.py +19 -10
- webscout/Provider/Andi.py +0 -33
- webscout/Provider/Blackboxai.py +4 -204
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/Marcus.py +137 -0
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/talkai.py +116 -0
- webscout/Provider/__init__.py +10 -3
- webscout/Provider/askmyai.py +158 -0
- webscout/Provider/cerebras.py +71 -58
- webscout/Provider/geminiapi.py +208 -198
- webscout/Provider/llama3mitril.py +181 -0
- webscout/Provider/llmchat.py +203 -0
- webscout/Provider/talkai.py +196 -0
- webscout/Provider/twitterclone.py +7 -6
- webscout/cli.py +354 -346
- webscout/version.py +1 -1
- webscout-6.3.dist-info/LICENSE.md +211 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/METADATA +11 -13
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/RECORD +31 -25
- webscout-6.2b0.dist-info/LICENSE.md +0 -50
- /webscout/Provider/TTI/{AIuncensored.py → AIuncensoredimage.py} +0 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/WHEEL +0 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/entry_points.txt +0 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Dict, Optional, Generator
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AskMyAI(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the askmyai.chat API. Improved to match webscout standards.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 2048, # Added max_tokens parameter
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
system_prompt: str = "You are a helpful assistant.", # Added system prompt
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the AskMyAI API."""
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.is_conversation = is_conversation
|
|
34
|
+
self.max_tokens_to_sample = max_tokens
|
|
35
|
+
self.api_endpoint = "https://www.askmyai.chat/api/chat"
|
|
36
|
+
self.timeout = timeout
|
|
37
|
+
self.last_response = {}
|
|
38
|
+
self.system_prompt = system_prompt # Use system prompt
|
|
39
|
+
self.headers = {
|
|
40
|
+
"Content-Type": "application/json",
|
|
41
|
+
"Accept": "*/*",
|
|
42
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
43
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
44
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
|
45
|
+
}
|
|
46
|
+
self.__available_optimizers = (
|
|
47
|
+
method
|
|
48
|
+
for method in dir(Optimizers)
|
|
49
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
50
|
+
)
|
|
51
|
+
Conversation.intro = (
|
|
52
|
+
AwesomePrompts().get_act(
|
|
53
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
54
|
+
)
|
|
55
|
+
if act
|
|
56
|
+
else intro or Conversation.intro
|
|
57
|
+
)
|
|
58
|
+
self.conversation = Conversation(
|
|
59
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
60
|
+
)
|
|
61
|
+
self.conversation.history_offset = history_offset
|
|
62
|
+
self.session.proxies = proxies
|
|
63
|
+
|
|
64
|
+
def ask(
|
|
65
|
+
self,
|
|
66
|
+
prompt: str,
|
|
67
|
+
stream: bool = False,
|
|
68
|
+
raw: bool = False,
|
|
69
|
+
optimizer: str = None,
|
|
70
|
+
conversationally: bool = False,
|
|
71
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
72
|
+
"""Sends a prompt to the askmyai.chat API and returns the response."""
|
|
73
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
74
|
+
if optimizer:
|
|
75
|
+
if optimizer in self.__available_optimizers:
|
|
76
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
77
|
+
conversation_prompt if conversationally else prompt
|
|
78
|
+
)
|
|
79
|
+
else:
|
|
80
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
81
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
payload = {
|
|
85
|
+
"messages": [
|
|
86
|
+
{"role": "system", "content": self.system_prompt},
|
|
87
|
+
{"role": "user", "content": conversation_prompt}
|
|
88
|
+
],
|
|
89
|
+
"data": {"datasource": "thucpn"}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
def for_stream():
|
|
93
|
+
response = self.session.post(
|
|
94
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
95
|
+
)
|
|
96
|
+
if not response.ok:
|
|
97
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
98
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
streaming_response = ""
|
|
102
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
103
|
+
if line:
|
|
104
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
105
|
+
if match:
|
|
106
|
+
content = match.group(1)
|
|
107
|
+
streaming_response += content
|
|
108
|
+
yield content if raw else {"text": content}
|
|
109
|
+
self.last_response.update({"text": streaming_response})
|
|
110
|
+
self.conversation.update_chat_history(
|
|
111
|
+
prompt, self.get_message(self.last_response)
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def for_non_stream():
|
|
115
|
+
full_response = ""
|
|
116
|
+
for chunk in for_stream():
|
|
117
|
+
full_response += chunk if raw else chunk['text']
|
|
118
|
+
return {"text": full_response}
|
|
119
|
+
|
|
120
|
+
return for_stream() if stream else for_non_stream()
|
|
121
|
+
|
|
122
|
+
def chat(
|
|
123
|
+
self,
|
|
124
|
+
prompt: str,
|
|
125
|
+
stream: bool = False,
|
|
126
|
+
optimizer: str = None,
|
|
127
|
+
conversationally: bool = False,
|
|
128
|
+
) -> str | Generator[str, None, None]:
|
|
129
|
+
"""Generates a response from the AskMyAI API."""
|
|
130
|
+
|
|
131
|
+
def for_stream():
|
|
132
|
+
for response in self.ask(
|
|
133
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
134
|
+
):
|
|
135
|
+
yield self.get_message(response)
|
|
136
|
+
|
|
137
|
+
def for_non_stream():
|
|
138
|
+
return self.get_message(
|
|
139
|
+
self.ask(
|
|
140
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return for_stream() if stream else for_non_stream()
|
|
145
|
+
|
|
146
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
147
|
+
"""Extracts the message from the API response."""
|
|
148
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
149
|
+
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
150
|
+
|
|
151
|
+
if __name__ == "__main__":
|
|
152
|
+
from rich import print
|
|
153
|
+
|
|
154
|
+
ai = AskMyAI(timeout=30)
|
|
155
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
156
|
+
|
|
157
|
+
for chunk in response:
|
|
158
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/cerebras.py
CHANGED
|
@@ -3,14 +3,11 @@ import requests
|
|
|
3
3
|
import json
|
|
4
4
|
import os
|
|
5
5
|
from typing import Any, Dict, Optional, Generator, List, Union
|
|
6
|
-
|
|
7
|
-
from webscout.
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
11
8
|
from webscout import exceptions
|
|
12
9
|
from fake_useragent import UserAgent
|
|
13
|
-
from cerebras.cloud.sdk import Cerebras
|
|
10
|
+
from cerebras.cloud.sdk import Cerebras as CerebrasSDK # type: ignore
|
|
14
11
|
|
|
15
12
|
|
|
16
13
|
class Cerebras(Provider):
|
|
@@ -29,39 +26,36 @@ class Cerebras(Provider):
|
|
|
29
26
|
proxies: dict = {},
|
|
30
27
|
history_offset: int = 10250,
|
|
31
28
|
act: str = None,
|
|
32
|
-
cookie_path: str = "cookie.json",
|
|
33
|
-
model: str = "llama3.1-8b",
|
|
29
|
+
cookie_path: str = "cookie.json",
|
|
30
|
+
model: str = "llama3.1-8b",
|
|
34
31
|
system_prompt: str = "You are a helpful assistant.",
|
|
35
32
|
):
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
Args:
|
|
40
|
-
cookie_path (str): Path to the cookie JSON file.
|
|
41
|
-
model (str, optional): Model name to use. Defaults to 'llama3.1-8b'.
|
|
42
|
-
system_prompt (str, optional): The system prompt to send with every request. Defaults to "You are a helpful assistant.".
|
|
43
|
-
|
|
44
|
-
Raises:
|
|
45
|
-
FileNotFoundError: If the cookie file is not found.
|
|
46
|
-
json.JSONDecodeError: If the cookie file has an invalid JSON format.
|
|
47
|
-
requests.exceptions.RequestException: If there's an error retrieving the API key.
|
|
48
|
-
"""
|
|
49
|
-
self.api_key = self.get_demo_api_key(cookie_path)
|
|
50
|
-
self.client = Cerebras(api_key=self.api_key)
|
|
33
|
+
# Initialize basic settings first
|
|
34
|
+
self.timeout = timeout
|
|
51
35
|
self.model = model
|
|
52
36
|
self.system_prompt = system_prompt
|
|
53
|
-
|
|
54
37
|
self.is_conversation = is_conversation
|
|
55
38
|
self.max_tokens_to_sample = max_tokens
|
|
56
|
-
self.timeout = timeout
|
|
57
39
|
self.last_response = {}
|
|
58
40
|
|
|
41
|
+
# Get API key first
|
|
42
|
+
try:
|
|
43
|
+
self.api_key = self.get_demo_api_key(cookie_path)
|
|
44
|
+
# Set environment variable for the SDK
|
|
45
|
+
os.environ["CEREBRAS_API_KEY"] = self.api_key
|
|
46
|
+
# Initialize the client with the API key
|
|
47
|
+
self.client = CerebrasSDK(api_key=self.api_key)
|
|
48
|
+
except Exception as e:
|
|
49
|
+
raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
|
|
50
|
+
|
|
51
|
+
# Initialize optimizers
|
|
59
52
|
self.__available_optimizers = (
|
|
60
53
|
method
|
|
61
54
|
for method in dir(Optimizers)
|
|
62
55
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
63
56
|
)
|
|
64
57
|
|
|
58
|
+
# Initialize conversation settings
|
|
65
59
|
Conversation.intro = (
|
|
66
60
|
AwesomePrompts().get_act(
|
|
67
61
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -74,12 +68,9 @@ class Cerebras(Provider):
|
|
|
74
68
|
)
|
|
75
69
|
self.conversation.history_offset = history_offset
|
|
76
70
|
|
|
77
|
-
|
|
78
71
|
@staticmethod
|
|
79
72
|
def extract_query(text: str) -> str:
|
|
80
|
-
"""
|
|
81
|
-
Extracts the first code block from the given text.
|
|
82
|
-
"""
|
|
73
|
+
"""Extracts the first code block from the given text."""
|
|
83
74
|
pattern = r"```(.*?)```"
|
|
84
75
|
matches = re.findall(pattern, text, re.DOTALL)
|
|
85
76
|
return matches[0].strip() if matches else text.strip()
|
|
@@ -97,7 +88,7 @@ class Cerebras(Provider):
|
|
|
97
88
|
except FileNotFoundError:
|
|
98
89
|
raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
|
|
99
90
|
except json.JSONDecodeError:
|
|
100
|
-
raise json.JSONDecodeError("Invalid JSON format in the cookie file.")
|
|
91
|
+
raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
|
|
101
92
|
|
|
102
93
|
headers = {
|
|
103
94
|
"Accept": "*/*",
|
|
@@ -130,7 +121,6 @@ class Cerebras(Provider):
|
|
|
130
121
|
except KeyError:
|
|
131
122
|
raise exceptions.InvalidResponseError("API key not found in response.")
|
|
132
123
|
|
|
133
|
-
|
|
134
124
|
def ask(
|
|
135
125
|
self,
|
|
136
126
|
prompt: str,
|
|
@@ -139,7 +129,7 @@ class Cerebras(Provider):
|
|
|
139
129
|
optimizer: str = None,
|
|
140
130
|
conversationally: bool = False,
|
|
141
131
|
) -> Union[Dict, Generator]:
|
|
142
|
-
|
|
132
|
+
"""Send a prompt to the model and get a response."""
|
|
143
133
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
144
134
|
if optimizer:
|
|
145
135
|
if optimizer in self.__available_optimizers:
|
|
@@ -154,30 +144,44 @@ class Cerebras(Provider):
|
|
|
154
144
|
{"content": conversation_prompt, "role": "user"},
|
|
155
145
|
]
|
|
156
146
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
)
|
|
147
|
+
try:
|
|
148
|
+
if stream:
|
|
149
|
+
return self._handle_stream_response(messages)
|
|
150
|
+
return self._handle_normal_response(messages)
|
|
151
|
+
except Exception as e:
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
|
|
153
|
+
|
|
154
|
+
def _handle_stream_response(self, messages: List[Dict]) -> Generator:
|
|
155
|
+
"""Handle streaming response from the model."""
|
|
156
|
+
try:
|
|
157
|
+
response = self.client.chat.completions.create(
|
|
158
|
+
messages=messages,
|
|
159
|
+
model=self.model,
|
|
160
|
+
stream=True
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
for choice in response.choices:
|
|
164
|
+
if hasattr(choice, 'delta') and hasattr(choice.delta, 'content') and choice.delta.content:
|
|
165
|
+
yield dict(text=choice.delta.content)
|
|
166
|
+
|
|
167
|
+
# Update last response with the complete message
|
|
168
|
+
if hasattr(response.choices[0], 'message'):
|
|
175
169
|
self.last_response.update({"text": response.choices[0].message.content})
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
170
|
+
|
|
171
|
+
except Exception as e:
|
|
172
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during streaming: {e}")
|
|
179
173
|
|
|
180
|
-
|
|
174
|
+
def _handle_normal_response(self, messages: List[Dict]) -> Dict:
|
|
175
|
+
"""Handle normal (non-streaming) response from the model."""
|
|
176
|
+
try:
|
|
177
|
+
response = self.client.chat.completions.create(
|
|
178
|
+
messages=messages,
|
|
179
|
+
model=self.model
|
|
180
|
+
)
|
|
181
|
+
self.last_response.update({"text": response.choices[0].message.content})
|
|
182
|
+
return self.last_response
|
|
183
|
+
except Exception as e:
|
|
184
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during response: {e}")
|
|
181
185
|
|
|
182
186
|
def chat(
|
|
183
187
|
self,
|
|
@@ -186,6 +190,7 @@ class Cerebras(Provider):
|
|
|
186
190
|
optimizer: str = None,
|
|
187
191
|
conversationally: bool = False,
|
|
188
192
|
) -> Union[str, Generator]:
|
|
193
|
+
"""High-level method to chat with the model."""
|
|
189
194
|
return self.get_message(
|
|
190
195
|
self.ask(
|
|
191
196
|
prompt, stream, optimizer=optimizer, conversationally=conversationally
|
|
@@ -193,14 +198,22 @@ class Cerebras(Provider):
|
|
|
193
198
|
)
|
|
194
199
|
|
|
195
200
|
def get_message(self, response: dict) -> str:
|
|
196
|
-
"""Retrieves message
|
|
201
|
+
"""Retrieves message from response."""
|
|
197
202
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
198
203
|
return response["text"]
|
|
199
204
|
|
|
200
205
|
|
|
201
206
|
if __name__ == "__main__":
|
|
202
207
|
from rich import print
|
|
203
|
-
|
|
204
|
-
|
|
208
|
+
|
|
209
|
+
# Example usage
|
|
210
|
+
cerebras = Cerebras(
|
|
211
|
+
cookie_path='cookie.json',
|
|
212
|
+
model='llama3.1-8b',
|
|
213
|
+
system_prompt="You are a helpful AI assistant."
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# Test with streaming
|
|
217
|
+
response = cerebras.chat("What is the meaning of life?", stream=True)
|
|
205
218
|
for chunk in response:
|
|
206
|
-
print(chunk, end="", flush=True)
|
|
219
|
+
print(chunk, end="", flush=True)
|