webscout 6.0__py3-none-any.whl → 6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/Onlinesearcher.py +22 -10
- webscout/Agents/functioncall.py +2 -2
- webscout/Bard.py +21 -21
- webscout/Local/__init__.py +6 -7
- webscout/Local/formats.py +404 -194
- webscout/Local/model.py +1074 -477
- webscout/Local/samplers.py +108 -144
- webscout/Local/thread.py +251 -410
- webscout/Local/ui.py +401 -0
- webscout/Local/utils.py +308 -131
- webscout/Provider/Amigo.py +1 -1
- webscout/Provider/NinjaChat.py +200 -0
- webscout/Provider/TTI/Nexra.py +3 -3
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiforce.py +2 -2
- webscout/Provider/TTI/imgninza.py +136 -0
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +8 -1
- webscout/Provider/aimathgpt.py +193 -0
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/gaurish.py +168 -0
- webscout/Provider/geminiprorealtime.py +160 -0
- webscout/Provider/julius.py +4 -0
- webscout/exceptions.py +5 -1
- webscout/utils.py +3 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +154 -123
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/METADATA +123 -120
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/RECORD +33 -28
- webscout/Local/rawdog.py +0 -946
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/LICENSE.md +0 -0
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/WHEEL +0 -0
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/entry_points.txt +0 -0
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class GaurishCerebras(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Gaurish Cerebras API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 2049,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the Gaurish Cerebras API client."""
|
|
32
|
+
self.url = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
|
|
33
|
+
self.headers = {
|
|
34
|
+
"Authorization": "Bearer 123",
|
|
35
|
+
"Content-Type": "application/json",
|
|
36
|
+
"Accept": "text/event-stream",
|
|
37
|
+
}
|
|
38
|
+
self.session = requests.Session()
|
|
39
|
+
self.session.headers.update(self.headers)
|
|
40
|
+
self.session.proxies.update(proxies)
|
|
41
|
+
self.timeout = timeout
|
|
42
|
+
self.last_response = {}
|
|
43
|
+
|
|
44
|
+
self.is_conversation = is_conversation
|
|
45
|
+
self.max_tokens_to_sample = max_tokens
|
|
46
|
+
self.__available_optimizers = (
|
|
47
|
+
method
|
|
48
|
+
for method in dir(Optimizers)
|
|
49
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
50
|
+
)
|
|
51
|
+
Conversation.intro = (
|
|
52
|
+
AwesomePrompts().get_act(
|
|
53
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
54
|
+
)
|
|
55
|
+
if act
|
|
56
|
+
else intro or system_prompt or Conversation.intro
|
|
57
|
+
)
|
|
58
|
+
self.conversation = Conversation(
|
|
59
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
60
|
+
)
|
|
61
|
+
self.conversation.history_offset = history_offset
|
|
62
|
+
self.system_prompt = system_prompt # Store the system prompt
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def ask(
|
|
66
|
+
self,
|
|
67
|
+
prompt: str,
|
|
68
|
+
stream: bool = False,
|
|
69
|
+
raw: bool = False,
|
|
70
|
+
optimizer: str = None,
|
|
71
|
+
conversationally: bool = False,
|
|
72
|
+
) -> Union[Dict, Generator]:
|
|
73
|
+
|
|
74
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
75
|
+
if optimizer:
|
|
76
|
+
if optimizer in self.__available_optimizers:
|
|
77
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
78
|
+
conversation_prompt if conversationally else prompt
|
|
79
|
+
)
|
|
80
|
+
else:
|
|
81
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
82
|
+
|
|
83
|
+
payload = {
|
|
84
|
+
"messages": [
|
|
85
|
+
{"role": "system", "content": self.system_prompt},
|
|
86
|
+
{"role": "user", "content": conversation_prompt},
|
|
87
|
+
],
|
|
88
|
+
"model": "llama3.1-70b",
|
|
89
|
+
"temperature": 0.75,
|
|
90
|
+
"stream": stream,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
def for_stream():
|
|
94
|
+
try:
|
|
95
|
+
with self.session.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
96
|
+
response.raise_for_status()
|
|
97
|
+
streaming_text = ""
|
|
98
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
99
|
+
if line:
|
|
100
|
+
line = line.strip()
|
|
101
|
+
if line.startswith("data: "):
|
|
102
|
+
line = line[6:]
|
|
103
|
+
if line == "[DONE]":
|
|
104
|
+
break
|
|
105
|
+
try:
|
|
106
|
+
data = json.loads(line)
|
|
107
|
+
if "choices" in data and data["choices"][0]["delta"].get("content"):
|
|
108
|
+
content = data["choices"][0]["delta"]["content"]
|
|
109
|
+
streaming_text += content
|
|
110
|
+
resp = dict(text=content) # Yield only the new content
|
|
111
|
+
yield resp if raw else resp
|
|
112
|
+
except json.JSONDecodeError:
|
|
113
|
+
# print(f"[Warning] Invalid JSON chunk received: {line}")
|
|
114
|
+
pass
|
|
115
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
116
|
+
self.last_response.update({"text": streaming_text})
|
|
117
|
+
|
|
118
|
+
except requests.exceptions.RequestException as e:
|
|
119
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def for_non_stream():
|
|
123
|
+
for _ in for_stream():
|
|
124
|
+
pass
|
|
125
|
+
return self.last_response
|
|
126
|
+
|
|
127
|
+
return for_stream() if stream else for_non_stream()
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def chat(
|
|
132
|
+
self,
|
|
133
|
+
prompt: str,
|
|
134
|
+
stream: bool = False,
|
|
135
|
+
optimizer: str = None,
|
|
136
|
+
conversationally: bool = False,
|
|
137
|
+
) -> Union[str, Generator]:
|
|
138
|
+
|
|
139
|
+
def for_stream():
|
|
140
|
+
for response in self.ask(
|
|
141
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
142
|
+
):
|
|
143
|
+
yield self.get_message(response)
|
|
144
|
+
|
|
145
|
+
def for_non_stream():
|
|
146
|
+
return self.get_message(
|
|
147
|
+
self.ask(
|
|
148
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
149
|
+
)
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
return for_stream() if stream else for_non_stream()
|
|
153
|
+
|
|
154
|
+
def get_message(self, response: dict) -> str:
|
|
155
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
156
|
+
return response["text"]
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
if __name__ == "__main__":
|
|
161
|
+
from rich import print
|
|
162
|
+
bot = GaurishCerebras()
|
|
163
|
+
try:
|
|
164
|
+
response = bot.chat("What is the capital of France?", stream=True)
|
|
165
|
+
for chunk in response:
|
|
166
|
+
print(chunk, end="", flush=True)
|
|
167
|
+
except Exception as e:
|
|
168
|
+
print(f"An error occurred: {e}")
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import secrets
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GeminiPro(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the Minitool AI API.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 2049,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the Minitool AI API client."""
|
|
32
|
+
self.url = "https://minitoolai.com/test_python/"
|
|
33
|
+
self.headers = {
|
|
34
|
+
'authority': 'minitoolai.com',
|
|
35
|
+
'method': 'POST',
|
|
36
|
+
'path': '/test_python/',
|
|
37
|
+
'scheme': 'https',
|
|
38
|
+
'accept': '*/*',
|
|
39
|
+
'content-type': 'application/json',
|
|
40
|
+
'dnt': '1',
|
|
41
|
+
'origin': 'https://minitoolai.com',
|
|
42
|
+
'priority': 'u=1, i',
|
|
43
|
+
'referer': 'https://minitoolai.com/Gemini-Pro/',
|
|
44
|
+
'sec-ch-ua': '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
45
|
+
'sec-ch-ua-mobile': '?0',
|
|
46
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
47
|
+
'sec-fetch-dest': 'empty',
|
|
48
|
+
'sec-fetch-mode': 'cors',
|
|
49
|
+
'sec-fetch-site': 'same-origin',
|
|
50
|
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0',
|
|
51
|
+
'x-requested-with': 'XMLHttpRequest'
|
|
52
|
+
}
|
|
53
|
+
self.session = requests.Session()
|
|
54
|
+
self.session.headers.update(self.headers)
|
|
55
|
+
self.session.proxies.update(proxies)
|
|
56
|
+
self.timeout = timeout
|
|
57
|
+
self.last_response = {}
|
|
58
|
+
|
|
59
|
+
self.is_conversation = is_conversation
|
|
60
|
+
self.max_tokens_to_sample = max_tokens
|
|
61
|
+
self.__available_optimizers = (
|
|
62
|
+
method
|
|
63
|
+
for method in dir(Optimizers)
|
|
64
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
65
|
+
)
|
|
66
|
+
Conversation.intro = (
|
|
67
|
+
AwesomePrompts().get_act(
|
|
68
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
69
|
+
)
|
|
70
|
+
if act
|
|
71
|
+
else intro or Conversation.intro
|
|
72
|
+
)
|
|
73
|
+
self.conversation = Conversation(
|
|
74
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
75
|
+
)
|
|
76
|
+
self.conversation.history_offset = history_offset
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def ask(
|
|
80
|
+
self,
|
|
81
|
+
prompt: str,
|
|
82
|
+
stream: bool = False,
|
|
83
|
+
raw: bool = False,
|
|
84
|
+
optimizer: str = None,
|
|
85
|
+
conversationally: bool = False,
|
|
86
|
+
) -> Union[Dict, Generator]:
|
|
87
|
+
"""Sends a chat completion request to the Minitool AI API."""
|
|
88
|
+
|
|
89
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
|
+
if optimizer:
|
|
91
|
+
if optimizer in self.__available_optimizers:
|
|
92
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
93
|
+
conversation_prompt if conversationally else prompt
|
|
94
|
+
)
|
|
95
|
+
else:
|
|
96
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
payload = {"utoken": secrets.token_hex(32), "message": conversation_prompt}
|
|
100
|
+
|
|
101
|
+
def for_stream():
|
|
102
|
+
# MinitoolAI doesn't support streaming; emulate with a single yield
|
|
103
|
+
try:
|
|
104
|
+
response = self.session.post(self.url, json=payload, timeout=self.timeout)
|
|
105
|
+
response.raise_for_status()
|
|
106
|
+
data = response.json()
|
|
107
|
+
text = data.get("response", "") # Get response, default to "" if missing
|
|
108
|
+
self.last_response.update({"text": text})
|
|
109
|
+
yield {"text": text} # Yield the entire response
|
|
110
|
+
except requests.exceptions.RequestException as e:
|
|
111
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
112
|
+
self.conversation.update_chat_history(prompt, text) #Update chat history
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def for_non_stream():
|
|
116
|
+
for _ in for_stream(): pass # Update last_response
|
|
117
|
+
return self.last_response
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
return for_stream() if stream else for_non_stream()
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def chat(
|
|
124
|
+
self,
|
|
125
|
+
prompt: str,
|
|
126
|
+
stream: bool = False,
|
|
127
|
+
optimizer: str = None,
|
|
128
|
+
conversationally: bool = False,
|
|
129
|
+
) -> Union[str, Generator]:
|
|
130
|
+
"""Generate response `str`"""
|
|
131
|
+
def for_stream():
|
|
132
|
+
for response in self.ask(
|
|
133
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
134
|
+
):
|
|
135
|
+
yield self.get_message(response)
|
|
136
|
+
|
|
137
|
+
def for_non_stream():
|
|
138
|
+
return self.get_message(
|
|
139
|
+
self.ask(
|
|
140
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return for_stream() if stream else for_non_stream()
|
|
145
|
+
|
|
146
|
+
def get_message(self, response: dict) -> str:
|
|
147
|
+
"""Retrieves message only from response"""
|
|
148
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
149
|
+
return response.get("text", "") # Handle missing keys
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
if __name__ == "__main__":
|
|
153
|
+
from rich import print
|
|
154
|
+
bot = GeminiPro()
|
|
155
|
+
try:
|
|
156
|
+
response = bot.chat("tell me about Gpt canvas", stream=True)
|
|
157
|
+
for chunk in response:
|
|
158
|
+
print(chunk, end="", flush=True)
|
|
159
|
+
except Exception as e:
|
|
160
|
+
print(f"An error occurred: {e}")
|
webscout/Provider/julius.py
CHANGED
webscout/exceptions.py
CHANGED
|
@@ -5,7 +5,9 @@ class WebscoutE(Exception):
|
|
|
5
5
|
class RatelimitE(Exception):
|
|
6
6
|
"""Raised for rate limit exceeded errors during API requests."""
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
class ConversationLimitException(Exception):
|
|
9
|
+
"""Raised for conversation limit exceeded errors during API requests."""
|
|
10
|
+
pass
|
|
9
11
|
class TimeoutE(Exception):
|
|
10
12
|
"""Raised for timeout errors during API requests."""
|
|
11
13
|
|
|
@@ -23,3 +25,5 @@ class FacebookInvalidCredentialsException(Exception):
|
|
|
23
25
|
class FacebookRegionBlocked(Exception):
|
|
24
26
|
pass
|
|
25
27
|
|
|
28
|
+
class ModelUnloadedException(Exception):
|
|
29
|
+
pass
|
webscout/utils.py
CHANGED
|
@@ -16,6 +16,9 @@ except ImportError:
|
|
|
16
16
|
|
|
17
17
|
REGEX_STRIP_TAGS = re.compile("<.*?>")
|
|
18
18
|
|
|
19
|
+
def _expand_proxy_tb_alias(proxy: str | None) -> str | None:
|
|
20
|
+
"""Expand "tb" to a full proxy URL if applicable."""
|
|
21
|
+
return "socks5://127.0.0.1:9150" if proxy == "tb" else proxy
|
|
19
22
|
|
|
20
23
|
def json_dumps(obj: Any) -> str:
|
|
21
24
|
try:
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "6.
|
|
1
|
+
__version__ = "6.1"
|
|
2
2
|
__prog__ = "webscout"
|